diff --git a/.circleci/config.yml b/.circleci/config.yml index 2d3c152fedc8b..f282aa8b7a819 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,33 +1,170 @@ ---- -defaults: - defaults: &defaults +version: 2.1 +orbs: + win: circleci/windows@2.4.0 + aws-cli: circleci/aws-cli@1.4.0 + +executors: + go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' + docker: + - image: 'quay.io/influxdb/telegraf-ci:1.17.3' environment: GOFLAGS: -p=8 - go-1_14: &go-1_14 - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.14.9' - go-1_15: &go-1_15 - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.2' - mac: &mac + mac: macos: - xcode: 11.3.1 + xcode: 12.4.0 working_directory: '~/go/src/github.com/influxdata/telegraf' environment: HOMEBREW_NO_AUTO_UPDATE: 1 GOFLAGS: -p=8 -version: 2 +commands: + generate-config: + parameters: + os: + type: string + default: "linux" + steps: + - checkout + - attach_workspace: + at: '/build' + - run: ./scripts/generate_config.sh << parameters.os >> + - store_artifacts: + path: './new-config' + destination: 'new-config' + - persist_to_workspace: + root: './new-config' + paths: + - '*' + check-changed-files-or-halt: + steps: + - run: ./scripts/check-file-changes.sh + test-go: + parameters: + os: + type: string + default: "linux" + gotestsum: + type: string + default: "gotestsum" + cache_version: + type: string + default: "v3" + steps: + - checkout + - check-changed-files-or-halt + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - restore_cache: + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - attach_workspace: + at: '/go' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - restore_cache: + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_mac.sh' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - run: rm -rf /c/Go + - restore_cache: + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_windows.sh' + - run: mkdir -p test-results + - run: ./scripts/install_gotestsum.sh << parameters.os >> << parameters.gotestsum >> + - run: | + PACKAGE_NAMES=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname) + ./<< parameters.gotestsum >> --junitfile test-results/gotestsum-report.xml -- -short $PACKAGE_NAMES + - store_test_results: + path: test-results + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '~/go/src/github.com/influxdata/telegraf/gotestsum' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '/go/src/github.com/influxdata/telegraf/gotestsum' + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - 'C:\Go' + - 'C:\Users\circleci\project\gotestsum.exe' + + package-build: + parameters: + release: + type: boolean + default: false + type: + type: string + default: "" + nightly: + type: boolean + default: false + steps: + - checkout + - check-changed-files-or-halt + - attach_workspace: + at: '/go' + - when: + condition: << parameters.release >> + steps: + - run: 'make package' + - when: + condition: << parameters.nightly >> + steps: + - run: 'NIGHTLY=1 make package include_packages="$(make << parameters.type >>)"' + - unless: + condition: + or: + - << parameters.nightly >> + - << parameters.release >> + steps: + - run: 'make package include_packages="$(make << parameters.type >>)"' + - store_artifacts: + path: './build/dist' + destination: 'build/dist' + - persist_to_workspace: + root: './build' + paths: + - 'dist' jobs: deps: - <<: [ *defaults, *go-1_15 ] + executor: go-1_17 steps: - checkout - restore_cache: key: go-mod-v1-{{ checksum "go.sum" }} + - check-changed-files-or-halt - run: 'make deps' - run: 'make tidy' + - run: 'make check' + - run: 'make check-deps' - save_cache: name: 'go module cache' key: go-mod-v1-{{ checksum "go.sum" }} @@ -37,188 +174,482 @@ jobs: root: '/go' paths: - '*' - macdeps: - <<: [ *mac ] + test-go-1_17: + executor: go-1_17 steps: - - checkout - - restore_cache: - key: mac-go-mod-v1-{{ checksum "go.sum" }} - - run: 'brew install go' # latest - - run: 'make deps' - - run: 'make tidy' - - save_cache: - name: 'go module cache' - key: mac-go-mod-v1-{{ checksum "go.sum" }} - paths: - - '~/go/pkg/mod' - - '/usr/local/Cellar/go' - - '/usr/local/bin/go' - - '/usr/local/bin/gofmt' - - persist_to_workspace: - root: '/' - paths: - - 'usr/local/bin/go' - - 'usr/local/Cellar/go' - - 'usr/local/bin/gofmt' - - 'Users/distiller/go' + - test-go + parallelism: 4 + test-go-1_17-386: + executor: go-1_17 + steps: + - test-go + parallelism: 4 + test-go-mac: + executor: mac + steps: + - test-go: + os: darwin + parallelism: 4 + test-go-windows: + executor: + name: win/default + shell: bash.exe + steps: + - test-go: + os: windows + gotestsum: "gotestsum.exe" + parallelism: 4 - test-go-1.14: - <<: [ *defaults, *go-1_14 ] + windows-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - - attach_workspace: - at: '/go' - - run: 'make' - - run: 'make test' - test-go-1.14-386: - <<: [ *defaults, *go-1_14 ] + - package-build: + type: windows + nightly: << parameters.nightly >> + darwin-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - - attach_workspace: - at: '/go' - - run: 'GOARCH=386 make' - - run: 'GOARCH=386 make test' - test-go-1.15: - <<: [ *defaults, *go-1_15 ] + - package-build: + type: darwin + nightly: << parameters.nightly >> + i386-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - - attach_workspace: - at: '/go' - - run: 'make' - - run: 'make check' - - run: 'make check-deps' - - run: 'make test' - test-go-1.15-386: - <<: [ *defaults, *go-1_15 ] + - package-build: + type: i386 + nightly: << parameters.nightly >> + ppc64le-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - - attach_workspace: - at: '/go' - - run: 'GOARCH=386 make' - - run: 'GOARCH=386 make check' - - run: 'GOARCH=386 make test' - test-go-darwin: - <<: [ *mac ] + - package-build: + type: ppc64le + nightly: << parameters.nightly >> + s390x-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - - attach_workspace: - at: '/' - - run: 'make' - - run: 'make check' - - run: 'make test' + - package-build: + type: s390x + nightly: << parameters.nightly >> + armel-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: armel + nightly: << parameters.nightly >> + amd64-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: amd64 + nightly: << parameters.nightly >> + arm64-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: arm64 + nightly: << parameters.nightly >> + mipsel-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: mipsel + nightly: << parameters.nightly >> + mips-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: mips + nightly: << parameters.nightly >> + static-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: static + nightly: << parameters.nightly >> + armhf-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: armhf + nightly: << parameters.nightly >> - package: - <<: [ *defaults, *go-1_15 ] + release: + executor: go-1_17 + steps: + - package-build: + release: true + nightly: + executor: go-1_17 steps: - attach_workspace: - at: '/go' - - run: 'make package' + at: '/build' + - run: + command: | + aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/nightlies/ \ + --exclude "*" \ + --include "*.tar.gz" \ + --include "*.deb" \ + --include "*.rpm" \ + --include "*.zip" \ + --acl public-read + package-consolidate: + executor: + name: win/default + shell: powershell.exe + steps: + - attach_workspace: + at: '/build' - store_artifacts: path: './build/dist' destination: 'build/dist' - - release: - <<: [ *defaults, *go-1_15 ] + package-sign-windows: + executor: + name: win/default + shell: powershell.exe steps: + - checkout + - check-changed-files-or-halt - attach_workspace: - at: '/go' - - run: 'make package' + at: '/build' + - run: + name: "Sign Windows Executables" + shell: powershell.exe + command: | + ./scripts/windows-signing.ps1 + - persist_to_workspace: + root: './build' + paths: + - 'dist' - store_artifacts: path: './build/dist' destination: 'build/dist' - nightly: - <<: [ *defaults, *go-1_15 ] + package-sign-mac: + macos: + xcode: "11.3" + working_directory: /Users/distiller/project + environment: + FL_OUTPUT_DIR: output + FASTLANE_LANE: test + shell: /bin/bash --login -o pipefail steps: + - checkout + - check-changed-files-or-halt - attach_workspace: - at: '/go' - - run: 'NIGHTLY=1 make package' - - run: 'make upload-nightly' + at: '.' + - run: + command: | + sh ./scripts/mac-signing.sh - store_artifacts: - path: './build/dist' + path: './dist' destination: 'build/dist' + test-awaiter: + executor: go-1_17 + steps: + - run: + command: | + echo "Go tests complete." + share-artifacts: + executor: aws-cli/default + steps: + - checkout + - check-changed-files-or-halt + - run: + command: | + PR=${CIRCLE_PULL_REQUEST##*/} + printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + generate-config: + executor: go-1_17 + steps: + - generate-config + generate-config-win: + executor: + name: win/default + shell: bash.exe + steps: + - generate-config: + os: windows + update-config: + executor: go-1_17 + steps: + - checkout + - attach_workspace: + at: '/new-config' + - run: ./scripts/update_config.sh ${UPDATE_CONFIG_TOKEN} + +commonjobs: + - &test-awaiter + 'test-awaiter': + requires: + - 'test-go-1_17' + - 'test-go-1_17-386' workflows: version: 2 check: jobs: - - 'macdeps': - filters: - tags: - only: /.*/ - 'deps': filters: tags: only: /.*/ - - 'test-go-1.14': + - 'test-go-1_17': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1.14-386': + - 'test-go-1_17-386': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1.15': - requires: - - 'deps' + - 'test-go-mac': filters: - tags: + tags: # only runs on tags if you specify this filter only: /.*/ - - 'test-go-1.15-386': - requires: - - 'deps' + - 'test-go-windows': filters: tags: only: /.*/ - - 'test-go-darwin': + - *test-awaiter + - 'windows-package': + requires: + - 'test-go-windows' + - 'darwin-package': + requires: + - 'test-go-mac' + - 'i386-package': + requires: + - 'test-awaiter' + - 'ppc64le-package': + requires: + - 'test-awaiter' + - 's390x-package': + requires: + - 'test-awaiter' + - 'armel-package': + requires: + - 'test-awaiter' + - 'amd64-package': + requires: + - 'test-awaiter' + - 'arm64-package': + requires: + - 'test-awaiter' + - 'armhf-package': + requires: + - 'test-awaiter' + - 'static-package': + requires: + - 'test-awaiter' + - 'mipsel-package': + requires: + - 'test-awaiter' + - 'mips-package': + requires: + - 'test-awaiter' + - 'generate-config': requires: - - 'macdeps' + - 'amd64-package' filters: - tags: # only runs on tags if you specify this filter - only: /.*/ - - 'package': + branches: + only: + - master + - 'generate-config-win': requires: - - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'windows-package' + filters: + branches: + only: + - master + - 'update-config': + requires: + - 'generate-config-win' + - 'generate-config' + filters: + branches: + only: + - master + - 'share-artifacts': + requires: + - 'i386-package' + - 'ppc64le-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'darwin-package' + - 'windows-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' + filters: + branches: + ignore: + - master + - release.* + tags: + ignore: /.*/ - 'release': requires: - - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'test-go-windows' + - 'test-go-mac' + - 'test-go-1_17' + - 'test-go-1_17-386' filters: tags: only: /.*/ branches: ignore: /.*/ + - 'package-sign-windows': + requires: + - 'release' + filters: + tags: + only: /.*/ + - 'package-sign-mac': + requires: + - 'package-sign-windows' + filters: + tags: + only: /.*/ + nightly: jobs: - 'deps' - - 'macdeps' - - 'test-go-1.14': + - 'test-go-1_17': requires: - 'deps' - - 'test-go-1.14-386': + - 'test-go-1_17-386': requires: - 'deps' - - 'test-go-1.15': + - 'test-go-mac' + - 'test-go-windows' + - *test-awaiter + - 'windows-package': + name: 'windows-package-nightly' + nightly: true requires: - - 'deps' - - 'test-go-1.15-386': + - 'test-go-windows' + - 'darwin-package': + name: 'darwin-package-nightly' + nightly: true requires: - - 'deps' - - 'test-go-darwin': + - 'test-go-mac' + - 'i386-package': + name: 'i386-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'ppc64le-package': + name: 'ppc64le-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 's390x-package': + name: 's390x-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'armel-package': + name: 'armel-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'amd64-package': + name: 'amd64-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'arm64-package': + name: 'arm64-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'armhf-package': + name: 'armhf-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'static-package': + name: 'static-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'mipsel-package': + name: 'mipsel-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'mips-package': + name: 'mips-package-nightly' + nightly: true requires: - - 'macdeps' - - 'nightly': + - 'test-awaiter' + - nightly: requires: - - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'i386-package-nightly' + - 'ppc64le-package-nightly' + - 's390x-package-nightly' + - 'armel-package-nightly' + - 'amd64-package-nightly' + - 'mipsel-package-nightly' + - 'mips-package-nightly' + - 'darwin-package-nightly' + - 'windows-package-nightly' + - 'static-package-nightly' + - 'arm64-package-nightly' + - 'armhf-package-nightly' triggers: - schedule: cron: "0 7 * * *" diff --git a/.gitattributes b/.gitattributes index 21bc439bf797e..7769daa83cb06 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,3 +3,4 @@ README.md merge=union go.sum merge=union plugins/inputs/all/all.go merge=union plugins/outputs/all/all.go merge=union +**/testdata/** test eol=lf diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 0000000000000..a9b657f105056 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,66 @@ +name: Bug Report +description: Create a bug report to help us improve +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking time to fill out this bug report! We reserve Telegraf issues for bugs for reproducible problems. + Please redirect any questions about Telegraf usage to our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) we have a lot of talented community members there who could help answer your question more quickly. + - type: textarea + id: config + attributes: + label: Relevent telegraf.conf + description: Place config in the toml code section. This will be automatically formatted into toml, so no need for backticks. + render: toml + validations: + required: true + - type: input + id: system-info + attributes: + label: System info + description: Include Telegraf version, operating system, and other relevant details + placeholder: ex. Telegraf 1.20.0, Ubuntu 20.04, Docker 20.10.8 + validations: + required: true + - type: textarea + id: docker + attributes: + label: Docker + description: If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against. + validations: + required: false + - type: textarea + id: reproduce + attributes: + label: Steps to reproduce + description: Describe the steps to reproduce the bug. + value: | + 1. + 2. + 3. + ... + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: Describe what you expected to happen when you performed the above steps. + validations: + required: true + - type: textarea + id: actual-behavior + attributes: + label: Actual behavior + description: Describe what actually happened when you performed the above steps. + validations: + required: true + - type: textarea + id: additional-info + attributes: + label: Additional info + description: Include gist of relevant config, logs, etc. + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md deleted file mode 100644 index 28c6237ac75d1..0000000000000 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Bug report -labels: bug -about: Create a report to help us improve - ---- - - -### Relevant telegraf.conf: - -```toml - -``` - -### System info: - - - -### Docker - - - -### Steps to reproduce: - - - -1. ... -2. ... - -### Expected behavior: - - - -### Actual behavior: - - - -### Additional info: - - diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 527555bdfc7a8..67b65a26247fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,26 @@ ### Required for all PRs: -- [ ] Signed [CLA](https://influxdata.com/community/cla/). -- [ ] Associated README.md updated. -- [ ] Has appropriate unit tests. + + +- [ ] Updated associated README.md. +- [ ] Wrote appropriate unit tests. +- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) + + + +resolves # + + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..2068f1f06444d --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + ignore: + # Dependabot isn't able to update this packages that do not match the source, so anything with a version + - dependency-name: "*.v*" + commit-message: + prefix: "fix:" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000000..d4eac0d328059 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,35 @@ +name: golangci-lint +on: + push: + branches: + - master + pull_request: + branches: + - master + schedule: + # Trigger every day at 16:00 UTC + - cron: '0 16 * * *' +jobs: + golangci-pr: + if: github.ref != 'refs/heads/master' + name: lint-pr-changes + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.42.1 + only-new-issues: true + golangci-master: + if: github.ref == 'refs/heads/master' + name: lint-master-all + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.42.1 + only-new-issues: true + args: --issues-exit-code=0 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 0000000000000..104d71db2230a --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,58 @@ +--- +################################# +################################# +## Super Linter GitHub Actions ## +################################# +################################# +name: Lint Code Base + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: + push: + branches-ignore: [master, main] + # Remove the line above to run when pushing to master + pull_request: + branches: [master, main] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Lint Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v2 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: github/super-linter@v4.8.1 + env: + VALIDATE_ALL_CODEBASE: false + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LINTER_RULES_PATH: '.' + MARKDOWN_CONFIG_FILE: .markdownlint.yml + VALIDATE_MARKDOWN: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000000..8d7b97187bd37 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,25 @@ +name: test + +on: [pull_request] + +jobs: + test: + runs-on: ubuntu-20.04 + steps: + - name: checkout source + uses: actions/checkout@v2 + + - name: build binary + run: | + sudo apt-get update + sudo apt-get install -y wget build-essential git + sudo apt-get upgrade -y + sudo wget https://golang.org/dl/go1.17.5.linux-amd64.tar.gz + sudo tar -C /usr/local -xzf go1.17.5.linux-amd64.tar.gz + export PATH="/usr/local/go/bin:$PATH" + go mod tidy + LDFLAGS='-s -w' make telegraf + if [ ! -f telegraf ]; then + echo "Failed to build binary" + exit 1 + fi diff --git a/.gitignore b/.gitignore index 9ef2123ad56be..f15f70b1e5671 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,8 @@ /telegraf.gz /vendor .DS_Store -result* \ No newline at end of file +result* +process.yml +/.vscode +/*.toml +/*.conf \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000000..a4d14ddd80362 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,130 @@ +linters: + disable-all: true + enable: + # - telegraflinter + - bodyclose + - dogsled + - errcheck + - goprintffuncname + - gosimple + - govet + - ineffassign + - nakedret + - nilerr + - predeclared + - revive + - sqlclosecheck + - staticcheck + - typecheck + - unconvert + - unused + - varcheck + +linters-settings: + revive: + rules: + - name: argument-limit + arguments: [ 6 ] + - name: atomic + - name: bare-return + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: confusing-naming + - name: confusing-results + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: early-return + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf +# - name: flag-parameter #disable for now + - name: function-result-limit + arguments: [ 3 ] + - name: identical-branches + - name: if-return + - name: imports-blacklist + arguments: [ "log" ] + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: redefines-builtin-id + - name: string-of-int + - name: struct-tag + - name: superfluous-else + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unhandled-error + arguments: ["fmt.Printf", "fmt.Println"] + - name: unnecessary-stmt + - name: unreachable-code + # - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: waitgroup-by-value + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 1 + +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 10m + + # which dirs to skip: issues from them won't be reported; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but default dirs are skipped independently + # from this option's value (see skip-dirs-use-default). + # "/" will be replaced by current OS file path separator to properly work + # on Windows. + skip-dirs: + - assets + - docs + - etc + - scripts + # - plugins/parsers/influx/machine.go + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + # "/" will be replaced by current OS file path separator to properly work + # on Windows. + skip-files: + - plugins/parsers/influx/machine.go* + +issues: + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + + exclude: + - don't use an underscore in package name #revive:var-naming + + exclude-rules: + - path: plugins/parsers/influx + linters: + - govet + + - path: _test\.go + text: "parameter.*seems to be a control flag, avoid control coupling" + +output: + format: tab diff --git a/.markdownlint.yml b/.markdownlint.yml new file mode 100644 index 0000000000000..1344b312f825e --- /dev/null +++ b/.markdownlint.yml @@ -0,0 +1,3 @@ +{ + "MD013": false +} diff --git a/CHANGELOG.md b/CHANGELOG.md index a5b3dfc4e85cf..ed54e1ff44f17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,18 +1,699 @@ -## v1.16.0 [unreleased] +## v1.20.4 [2021-11-17] + +#### Release Notes + + - [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 + - [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation + +Thank you to @zak-pawel for lots of linter fixes! + + - [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* + - [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* + - [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* + - [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* + +#### Bugfixes + + - [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 + - [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI + - [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 + - [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up + - [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int + - [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd + - [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library + - [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" + - [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly + - [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 + - [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver + - [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs + - [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling + +#### Features +#### New Input Plugins +#### New Output Plugins +#### New External Plugins + +## v1.20.3 [2021-10-27] + +#### Release Notes + + - [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 + +#### Bugfixes + + - [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 + - [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs + - [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps + - [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 + - [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation + - [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests + - [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library + - [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys + - [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD + - [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory + - [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin + - [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field + - [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels + - [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size + - [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook + - [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset + - [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 + - [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 + - [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 + - [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql + - [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible + - [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place + - [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 + - [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 + - [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage + - [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 + +#### New External Plugins + + - [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka + - [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka + +## v1.20.2 [2021-10-07] + +#### Bugfixes + + - [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API + - [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields + - [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser + - [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 + - [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built + +## v1.20.1 [2021-10-06] + +#### Bugfixes + + - [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 + - [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 + - [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 + - [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference + - [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging + - [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config + - [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags + - [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version + - [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing + - [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation + - [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client + - [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 + - [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module + - [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 + - [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 + - [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 + +#### Features + + - [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field + +## v1.20.0 [2021-09-17] + +#### Release Notes + + - [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +#### Bugfixes + + - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 + - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests + - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 + - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 + - [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives + - [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds + - [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 + - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version + - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value + - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. + - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 + - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query + - [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 + - [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats + - [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 + - [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 + - [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names + - [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 + - [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 + - [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error + - [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging + - [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 + - [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module + - [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak + - [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting + +#### Features + + - [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support + - [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype + - [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces + - [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider + - [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP + - [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children + - [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type + - [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog + - [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page + - [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag + - [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins + - [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support + - [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name + - [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser + - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance + - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url + - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status + - [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) + - [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 + +#### New Input Plugins + + - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs + - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection + - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input + - [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin + +#### New Output Plugins + + - [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output + - [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output + + +## v1.19.3 [2021-08-18] + +#### Bugfixes + + - [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 + - [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 + - [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 + - [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 + - [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery + - [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation + - [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores + - [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set + - [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection + - [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api + - [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki + - [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 + - [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error + - [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 + - [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path + - [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 + - [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 + - [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered + +## v1.19.2 [2021-07-28] + +#### Release Notes + + - [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 + +#### Bugfixes + + - [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions + - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written + - [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims + - [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting + - [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column + - [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* + - [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics + - [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name + - [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics + - [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection + - [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled + - [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure + - [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map + - [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication + - [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers + - [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles + - [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log + - [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 + - [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups + - [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly + +#### Features + + - [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified + +## v1.19.1 [2021-07-07] + +#### Bugfixes + + - [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified + - [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory + - [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic + - [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic + - [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic + - [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression + - [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 + - [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error + - [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error + - [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 + - [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 + - [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 + - [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics + - [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify + - [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https + - [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 + - [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys + - [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support + - [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 + - [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* + +## v1.19.0 [2021-06-17] + +#### Release Notes + +- Many linter fixes - thanks @zak-pawel and all! +- [#9331](https://github.com/influxdata/telegraf/pull/9331) Update Go to 1.16.5 + +#### Bugfixes + +- [#9182](https://github.com/influxdata/telegraf/pull/9182) Update pgx to v4 +- [#9275](https://github.com/influxdata/telegraf/pull/9275) Fix reading config files starting with http: +- [#9196](https://github.com/influxdata/telegraf/pull/9196) `serializers.prometheusremotewrite` Update dependency and remove tags with empty values +- [#9051](https://github.com/influxdata/telegraf/pull/9051) `outputs.kafka` Don't prevent telegraf from starting when there's a connection error +- [#8795](https://github.com/influxdata/telegraf/pull/8795) `parsers.prometheusremotewrite` Update prometheus dependency to v2.21.0 +- [#9295](https://github.com/influxdata/telegraf/pull/9295) `outputs.dynatrace` Use dynatrace-metric-utils +- [#9368](https://github.com/influxdata/telegraf/pull/9368) `parsers.json_v2` Update json_v2 parser to handle null types +- [#9359](https://github.com/influxdata/telegraf/pull/9359) `inputs.sql` Fix import of sqlite and ignore it on all platforms that require CGO. +- [#9329](https://github.com/influxdata/telegraf/pull/9329) `inputs.kube_inventory` Fix connecting to the wrong url +- [#9358](https://github.com/influxdata/telegraf/pull/9358) upgrade denisenkom go-mssql to v0.10.0 +- [#9283](https://github.com/influxdata/telegraf/pull/9283) `processors.parser` Fix segfault +- [#9243](https://github.com/influxdata/telegraf/pull/9243) `inputs.docker` Close all idle connections +- [#9338](https://github.com/influxdata/telegraf/pull/9338) `inputs.suricata` Support new JSON format +- [#9296](https://github.com/influxdata/telegraf/pull/9296) `outputs.influxdb` Fix endless retries + +#### Features + +- [#8987](https://github.com/influxdata/telegraf/pull/8987) Config file environment variable can be a URL +- [#9297](https://github.com/influxdata/telegraf/pull/9297) `outputs.datadog` Add HTTP proxy to datadog output +- [#9087](https://github.com/influxdata/telegraf/pull/9087) Add named timestamp formats +- [#9276](https://github.com/influxdata/telegraf/pull/9276) `inputs.vsphere` Add config option for the historical interval duration +- [#9274](https://github.com/influxdata/telegraf/pull/9274) `inputs.ping` Add an option to specify packet size +- [#9007](https://github.com/influxdata/telegraf/pull/9007) Allow multiple "--config" and "--config-directory" flags +- [#9249](https://github.com/influxdata/telegraf/pull/9249) `outputs.graphite` Allow more characters in graphite tags +- [#8351](https://github.com/influxdata/telegraf/pull/8351) `inputs.sqlserver` Added login_name +- [#9223](https://github.com/influxdata/telegraf/pull/9223) `inputs.dovecot` Add support for unix domain sockets +- [#9118](https://github.com/influxdata/telegraf/pull/9118) `processors.strings` Add UTF-8 sanitizer +- [#9156](https://github.com/influxdata/telegraf/pull/9156) `inputs.aliyuncms` Add config option list of regions to query +- [#9138](https://github.com/influxdata/telegraf/pull/9138) `common.http` Add OAuth2 to HTTP input +- [#8822](https://github.com/influxdata/telegraf/pull/8822) `inputs.sqlserver` Enable Azure Active Directory (AAD) authentication support +- [#9136](https://github.com/influxdata/telegraf/pull/9136) `inputs.cloudwatch` Add wildcard support in dimensions configuration +- [#5517](https://github.com/influxdata/telegraf/pull/5517) `inputs.mysql` Gather all mysql channels +- [#8911](https://github.com/influxdata/telegraf/pull/8911) `processors.enum` Support float64 +- [#9105](https://github.com/influxdata/telegraf/pull/9105) `processors.starlark` Support nanosecond resolution timestamp +- [#9080](https://github.com/influxdata/telegraf/pull/9080) `inputs.logstash` Add support for version 7 queue stats +- [#9074](https://github.com/influxdata/telegraf/pull/9074) `parsers.prometheusremotewrite` Add starlark script for renaming metrics +- [#9032](https://github.com/influxdata/telegraf/pull/9032) `inputs.couchbase` Add ~200 more Couchbase metrics via Buckets endpoint +- [#8596](https://github.com/influxdata/telegraf/pull/8596) `inputs.sqlserver` input/sqlserver: Add service and save connection pools +- [#9042](https://github.com/influxdata/telegraf/pull/9042) `processors.starlark` Add math module +- [#6952](https://github.com/influxdata/telegraf/pull/6952) `inputs.x509_cert` Wildcard support for cert filenames +- [#9004](https://github.com/influxdata/telegraf/pull/9004) `processors.starlark` Add time module +- [#8891](https://github.com/influxdata/telegraf/pull/8891) `inputs.kinesis_consumer` Add content_encoding option with gzip and zlib support +- [#8996](https://github.com/influxdata/telegraf/pull/8996) `processors.starlark` Add an example showing how to obtain IOPS from diskio input +- [#8966](https://github.com/influxdata/telegraf/pull/8966) `inputs.http_listener_v2` Add support for snappy compression +- [#8661](https://github.com/influxdata/telegraf/pull/8661) `inputs.cisco_telemetry_mdt` Add support for events and class based query +- [#8861](https://github.com/influxdata/telegraf/pull/8861) `inputs.mongodb` Optionally collect top stats +- [#8979](https://github.com/influxdata/telegraf/pull/8979) `parsers.value` Add custom field name config option +- [#8544](https://github.com/influxdata/telegraf/pull/8544) `inputs.sqlserver` Add an optional health metric + +#### New Input Plugins + +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov +- [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble +- [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak +- [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql) - contributed by @srebhan + +#### New Output Plugins + +- [Websocket](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/websocket) - contributed by @FZambia +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sql) - contributed by @illuusio +- [AWS Cloudwatch logs](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch_logs) - contributed by @i-prudnikov + +#### New Parser Plugins + +- [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - contributed by @helenosheaa +- [JSON V2](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2) - contributed by @sspaink + +#### New External Plugins + +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - contributed by @falon +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - contributed by @jcgonnard +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - contributed by @machinly +- [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - contributed by @SLedunois + +## v1.18.3 [2021-05-20] + +#### Release Notes + + - Added FreeBSD armv7 build + +#### Bugfixes + + - [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics + - [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 + - [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error + - [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query + - [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 + - [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs + - [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 + - [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 + - [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 + - [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 + - [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 + - [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 + - [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 + - [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go + +#### Features + + - [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression + +## v1.18.2 [2021-04-28] + +#### Bugfixes + + - [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings + - [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo + - [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls + - [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write + - [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures + - [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner + - [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later + - [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name + - [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling + +## v1.18.1 [2021-04-07] + +#### Bugfixes + + - [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 + - [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override + - [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat + - [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed + - [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode + - [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently + - [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id + - [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object + - [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation + - [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic + - [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats + - [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode + - [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg + +## v1.18.0 [2021-03-17] + +#### Release Notes + + - Support Go version 1.16.2 + - Added support for code signing in Windows + +#### Bugfixes + + - [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice + - [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling + - [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list + - [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin + - [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues + - [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count + - [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions + - [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin + - [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug + - [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types + - [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache + - [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser + - [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies + - [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. + - [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true + - [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL + - [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set + - [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file + +#### Features + + - [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin + - [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality + - [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy + - [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) + - [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest + - [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols + - [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging + - [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin + - [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric + - [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url + - [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows + - [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script + - [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin + - [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input + - [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON + - [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) + - [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients + - [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only + +#### New Inputs + - [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch + - [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog + - [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData + - [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey + - [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga + +#### New Outputs + - [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac + - [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura + - [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey + - [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb + +#### New Aggregators + - [Derivative Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative)- Contributed by @KarstenSchnitter + - [Quantile Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan + +#### New Processors + - [AWS EC2 Metadata Processor Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo + +#### New Parsers + - [XML Parser Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan + +#### New Serializers + - [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox + +#### New External Plugins + - [GeoIP Processor Plugin ](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali + - [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat + - [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope + +## v1.17.3 [2021-02-17] + +#### Bugfixes + + - [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files + - [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 + - [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 + - [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value + - [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue + - [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt + - [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors + - [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper + - [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config + - [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline + - [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux + - [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version + + +## v1.17.2 [2021-01-28] + +#### Bugfixes + + - [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native + - [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function + + +## v1.17.1 [2021-01-27] + +#### Release Notes + + Included a few more changes that add configuration options to plugins as it's been while since the last release + + - [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool + - [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows + - [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible + - [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser + - [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout + - [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames + - [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C + - [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input + - [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin + - [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection + +#### Bugfixes + + - [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses + - [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors + - [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. + - [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value + - [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. + - [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin + - [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue + - [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 + - [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 + - [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 + - [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. + - [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. + - [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. + - [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge + - [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses + - [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 + - [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start + +#### New External Plugins + + - [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin + + +## v1.17.0 [2020-12-18] + +#### Release Notes + + - Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. + - New input plugins: Riemann-Protobuff Listener, Intel PowerStat + - New output plugins: Yandex.Cloud monitoring, Logz.io + - New parser plugin: Prometheus + - New serializer: Prometheus remote write + +#### Bugfixes + + - [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter + - [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. + - [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests + - [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test + - [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 + - [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition + - [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write + - [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there + - [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data + - [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser + - [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers + - [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits + - [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory + +#### Features + + - [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement + - [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin + - [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI + - [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries + - [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. + - [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers + - [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries + - [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call + - [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB + - [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] + - [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty + - [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin + - [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le + - [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) + - [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data + - [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input + - [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin + - [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input + - [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values + - [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes + - [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin + - [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 + - [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin + - [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin + - [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input + - [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics + - [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor + - [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added + - [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support + + +#### New Parser Plugins + + - [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus + +#### New Serializer Plugins + + - [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer + +#### New Input Plugins + + - [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener + - [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin + +#### New Output Plugins + + - [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring + - [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin + + +## v1.16.3 [2020-12-01] + +#### Bugfixes + + - [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 + - [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 + - [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype + - [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name + - [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" + - [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency + - [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor + - [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column + - [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output + - [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor + - [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark + - [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list + - [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging + - [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors + - [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function + + +## v1.16.2 [2020-11-13] + +#### Bugfixes + + - [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). + - [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs + - [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) + - [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme + - [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes + - [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config + - [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests + - [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 + - [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 + - [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag + - [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding + - [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 + - [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test + - [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin + +## v1.16.1 [2020-10-28] + +#### Release Notes + + - [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + +#### Bugfixes + + - [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix + - [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters + - [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc + - [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 + - [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir + - [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error + - [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers + + +## v1.16.0 [2020-10-21] #### Release Notes - - Many documentation updates - New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) - - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to go 1.15 - - [#7864](https://github.com/influxdata/telegraf/pull/7864) `processors.starlark` Add logic starlark example - - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck - - [#7932](https://github.com/influxdata/telegraf/pull/7932) Support for AWS Cloudwatch Alarms #7931 - [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 - - [#7980](https://github.com/influxdata/telegraf/pull/7980) `processors.starlark` add example input/outputs to starlark examples - [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd - [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release + - [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor #### Features @@ -37,6 +718,9 @@ - [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag - [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) - [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands + - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 + - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code + - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting #### Bugfixes @@ -44,6 +728,7 @@ - [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags - [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed - [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors + - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored - [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF - [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds @@ -64,6 +749,8 @@ - [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression - [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 - [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform + - [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared + - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd #### New Input Plugins @@ -79,6 +766,7 @@ - [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue - [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo + - [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest #### New External Plugins @@ -87,6 +775,14 @@ - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. + - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. + +## v1.15.4 [2020-10-20] + +#### Bugfixes + + - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd + - [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging ## v1.15.3 [2020-09-11] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 897ac1377e6e7..d5732dcbfa1d1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,24 +1,30 @@ ### Contributing 1. [Sign the CLA][cla]. -1. Open a [new issue][] to discuss the changes you would like to make. This is +2. Open a [new issue][] to discuss the changes you would like to make. This is not strictly required but it may help reduce the amount of rework you need to do later. -1. Make changes or write plugin using the guidelines in the following +3. Make changes or write plugin using the guidelines in the following documents: - [Input Plugins][inputs] - [Processor Plugins][processors] - [Aggregator Plugins][aggregators] - [Output Plugins][outputs] -1. Ensure you have added proper unit tests and documentation. -1. Open a new [pull request][]. +4. Ensure you have added proper unit tests and documentation. +5. Open a new [pull request][]. +6. The pull request title needs to follow [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) -#### Contributing an External Plugin *(experimental)* -Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. +**Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead. -Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. -Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. +#### When will your contribution get released? +We have two kinds of releases: patch releases, which happen every few weeks, and feature releases, which happen once a quarter. If your fix is a bug fix, it will be released in the next patch release after it is merged to master. If your release is a new plugin or other feature, it will be released in the next quarterly release after it is merged to master. Quarterly releases are on the third Wednesday of March, June, September, and December. +#### Contributing an External Plugin + +Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. + +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. +Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. #### Security Vulnerability Reporting InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our @@ -61,13 +67,30 @@ running. You can start the containers with: docker-compose up ``` -And run the full test suite with: +To run only the integration tests use: + +``` +make test-integration +``` + +To run the full test suite use: ``` make test-all ``` Use `make docker-kill` to stop the containers. +### For more developer resources +- [Code Style][codestyle] +- [Deprecation][deprecation] +- [Logging][logging] +- [Metric Format Changes][metricformat] +- [Packaging][packaging] +- [Logging][logging] +- [Packaging][packaging] +- [Profiling][profiling] +- [Reviews][reviews] +- [Sample Config][sample config] [cla]: https://www.influxdata.com/legal/cla/ [new issue]: https://github.com/influxdata/telegraf/issues/new/choose @@ -76,3 +99,11 @@ Use `make docker-kill` to stop the containers. [processors]: /docs/PROCESSORS.md [aggregators]: /docs/AGGREGATORS.md [outputs]: /docs/OUTPUTS.md +[codestyle]: /docs/developers/CODE_STYLE.md +[deprecation]: /docs/developers/DEPRECATION.md +[logging]: /docs/developers/LOGGING.md +[metricformat]: /docs/developers/METRIC_FORMAT_CHANGES.md +[packaging]: /docs/developers/PACKAGING.md +[profiling]: /docs/developers/PROFILING.md +[reviews]: /docs/developers/REVIEWS.md +[sample config]: /docs/developers/SAMPLE_CONFIG.md diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 1aea58dac3070..0de5ae47949d9 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -5,11 +5,30 @@ Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for mor Pull requests welcome. - ## Inputs -- [rand](https://github.com/ssoroka/rand) - Generate random numbers -- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts -- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation. +- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) +- [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). +- [rand](https://github.com/ssoroka/rand) - Generate random numbers +- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts +- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels +- [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Gather statistics from 389ds and from LDAP trees. +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files +- [s7comm](https://github.com/nicolasme/s7comm) - Gather information from Siemens PLC +- [net_irtt](https://github.com/iAnatoly/telegraf-input-net_irtt) - Gather information from IRTT network test +- [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors +- [oracle](https://github.com/bonitoo-io/telegraf-input-oracle) - Gather the statistic data from Oracle RDBMS +- [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS + +## Outputs +- [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. + +## Processors + - [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. + diff --git a/Makefile b/Makefile index 4dd2754ec0910..52362a307790c 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -next_version := 1.16.0 +next_version := $(shell cat build_version.txt) tag := $(shell git describe --exact-match --tags 2>git_describe_error.tmp; rm -f git_describe_error.tmp) branch := $(shell git rev-parse --abbrev-ref HEAD) commit := $(shell git rev-parse --short=8 HEAD) +glibc_version := 2.17 ifdef NIGHTLY version := $(next_version) @@ -40,7 +41,7 @@ GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) HOSTGO := env -u GOOS -u GOARCH -u GOARM -- go -LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch) +LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch) -X main.goos=$(GOOS) -X main.goarch=$(GOARCH) ifneq ($(tag),) LDFLAGS += -X main.version=$(version) endif @@ -68,21 +69,33 @@ all: .PHONY: help help: @echo 'Targets:' - @echo ' all - download dependencies and compile telegraf binary' - @echo ' deps - download dependencies' - @echo ' telegraf - compile telegraf binary' - @echo ' test - run short unit tests' - @echo ' fmt - format source files' - @echo ' tidy - tidy go modules' - @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' - @echo ' clean - delete build artifacts' + @echo ' all - download dependencies and compile telegraf binary' + @echo ' deps - download dependencies' + @echo ' telegraf - compile telegraf binary' + @echo ' test - run short unit tests' + @echo ' fmt - format source files' + @echo ' tidy - tidy go modules' + @echo ' lint - run linter' + @echo ' lint-branch - run linter on changes in current branch since master' + @echo ' lint-install - install linter' + @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' + @echo ' clean - delete build artifacts' + @echo ' package - build all supported packages, override include_packages to only build a subset' + @echo ' e.g.: make package include_packages="amd64.deb"' @echo '' - @echo 'Package Targets:' - @$(foreach dist,$(dists),echo " $(dist)";) + @echo 'Possible values for include_packages variable' + @$(foreach package,$(include_packages),echo " $(package)";) + @echo '' + @echo 'Resulting package name format (where arch will be the arch of the package):' + @echo ' telegraf_$(deb_version)_arch.deb' + @echo ' telegraf-$(rpm_version).arch.rpm' + @echo ' telegraf-$(tar_version)_arch.tar.gz' + @echo ' telegraf-$(tar_version)_arch.zip' + .PHONY: deps deps: - go mod download + go mod download -x .PHONY: telegraf telegraf: @@ -97,6 +110,10 @@ go-install: test: go test -short $(race_detector) ./... +.PHONY: test-integration +test-integration: + go test -run Integration $(race_detector) ./... + .PHONY: fmt fmt: @gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) @@ -111,15 +128,6 @@ fmtcheck: exit 1 ;\ fi -.PHONY: test-windows -test-windows: - go test -short $(race_detector) ./plugins/inputs/ping/... - go test -short $(race_detector) ./plugins/inputs/win_perf_counters/... - go test -short $(race_detector) ./plugins/inputs/win_services/... - go test -short $(race_detector) ./plugins/inputs/procstat/... - go test -short $(race_detector) ./plugins/inputs/ntpq/... - go test -short $(race_detector) ./plugins/processors/port_name/... - .PHONY: vet vet: @echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)' @@ -130,18 +138,50 @@ vet: exit 1; \ fi +.PHONY: lint-install +lint-install: + @echo "Installing golangci-lint" + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 + + @echo "Installing markdownlint" + npm install -g markdownlint-cli + +.PHONY: lint +lint: +ifeq (, $(shell which golangci-lint)) + $(info golangci-lint can't be found, please run: make lint-install) + exit 1 +endif + + golangci-lint run + +ifeq (, $(shell which markdownlint-cli)) + $(info markdownlint-cli can't be found, please run: make lint-install) + exit 1 +endif + + markdownlint-cli + +.PHONY: lint-branch +lint-branch: +ifeq (, $(shell which golangci-lint)) + $(info golangci-lint can't be found, please run: make lint-install) + exit 1 +endif + + golangci-lint run --new-from-rev master + .PHONY: tidy tidy: go mod verify go mod tidy @if ! git diff --quiet go.mod go.sum; then \ - echo "please run go mod tidy and check in changes"; \ + echo "please run go mod tidy and check in changes, you might have to use the same version of Go as the CI"; \ exit 1; \ fi .PHONY: check check: fmtcheck vet - @$(MAKE) --no-print-directory tidy .PHONY: test-all test-all: fmtcheck vet @@ -169,15 +209,10 @@ plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up -.PHONY: ci-1.15 -ci-1.15: - docker build -t quay.io/influxdb/telegraf-ci:1.15.2 - < scripts/ci-1.15.docker - docker push quay.io/influxdb/telegraf-ci:1.15.2 - -.PHONY: ci-1.14 -ci-1.14: - docker build -t quay.io/influxdb/telegraf-ci:1.14.9 - < scripts/ci-1.14.docker - docker push quay.io/influxdb/telegraf-ci:1.14.9 +.PHONY: ci-1.17 +ci-1.17: + docker build -t quay.io/influxdb/telegraf-ci:1.17.3 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.3 .PHONY: install install: $(buildbin) @@ -191,6 +226,7 @@ install: $(buildbin) @if [ $(GOOS) != "windows" ]; then cp -fv etc/telegraf.conf $(DESTDIR)$(sysconfdir)/telegraf/telegraf.conf$(conf_suffix); fi @if [ $(GOOS) != "windows" ]; then cp -fv etc/logrotate.d/telegraf $(DESTDIR)$(sysconfdir)/logrotate.d; fi @if [ $(GOOS) = "windows" ]; then cp -fv etc/telegraf_windows.conf $(DESTDIR)/telegraf.conf; fi + @if [ $(GOOS) = "linux" ]; then scripts/check-dynamic-glibc-versions.sh $(buildbin) $(glibc_version); fi @if [ $(GOOS) = "linux" ]; then mkdir -pv $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi @if [ $(GOOS) = "linux" ]; then cp -fv scripts/telegraf.service $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi @if [ $(GOOS) = "linux" ]; then cp -fv scripts/init.sh $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi @@ -202,187 +238,176 @@ $(buildbin): @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf -debs := telegraf_$(deb_version)_amd64.deb -debs += telegraf_$(deb_version)_arm64.deb -debs += telegraf_$(deb_version)_armel.deb -debs += telegraf_$(deb_version)_armhf.deb -debs += telegraf_$(deb_version)_i386.deb -debs += telegraf_$(deb_version)_mips.deb -debs += telegraf_$(deb_version)_mipsel.deb -debs += telegraf_$(deb_version)_s390x.deb - -rpms += telegraf-$(rpm_version).aarch64.rpm -rpms += telegraf-$(rpm_version).armel.rpm -rpms += telegraf-$(rpm_version).armv6hl.rpm -rpms += telegraf-$(rpm_version).i386.rpm -rpms += telegraf-$(rpm_version).s390x.rpm -rpms += telegraf-$(rpm_version).x86_64.rpm - -tars += telegraf-$(tar_version)_darwin_amd64.tar.gz -tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz -tars += telegraf-$(tar_version)_freebsd_i386.tar.gz -tars += telegraf-$(tar_version)_linux_amd64.tar.gz -tars += telegraf-$(tar_version)_linux_arm64.tar.gz -tars += telegraf-$(tar_version)_linux_armel.tar.gz -tars += telegraf-$(tar_version)_linux_armhf.tar.gz -tars += telegraf-$(tar_version)_linux_i386.tar.gz -tars += telegraf-$(tar_version)_linux_mips.tar.gz -tars += telegraf-$(tar_version)_linux_mipsel.tar.gz -tars += telegraf-$(tar_version)_linux_s390x.tar.gz -tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz - -zips += telegraf-$(tar_version)_windows_amd64.zip -zips += telegraf-$(tar_version)_windows_i386.zip - -dists := $(debs) $(rpms) $(tars) $(zips) +# Define packages Telegraf supports, organized by architecture with a rule to echo the list to limit include_packages +# e.g. make package include_packages="$(make amd64)" +mips += linux_mips.tar.gz mips.deb +.PHONY: mips +mips: + @ echo $(mips) +mipsel += mipsel.deb linux_mipsel.tar.gz +.PHONY: mipsel +mipsel: + @ echo $(mipsel) +arm64 += linux_arm64.tar.gz arm64.deb aarch64.rpm +.PHONY: arm64 +arm64: + @ echo $(arm64) +amd64 += freebsd_amd64.tar.gz linux_amd64.tar.gz amd64.deb x86_64.rpm +.PHONY: amd64 +amd64: + @ echo $(amd64) +static += static_linux_amd64.tar.gz +.PHONY: static +static: + @ echo $(static) +armel += linux_armel.tar.gz armel.rpm armel.deb +.PHONY: armel +armel: + @ echo $(armel) +armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm +.PHONY: armhf +armhf: + @ echo $(armhf) +s390x += linux_s390x.tar.gz s390x.deb s390x.rpm +.PHONY: s390x +s390x: + @ echo $(s390x) +ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb +.PHONY: ppc64le +ppc64le: + @ echo $(ppc64le) +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gz i386.rpm +.PHONY: i386 +i386: + @ echo $(i386) +windows += windows_i386.zip windows_amd64.zip +.PHONY: windows +windows: + @ echo $(windows) +darwin += darwin_amd64.tar.gz +.PHONY: darwin +darwin: + @ echo $(darwin) + +include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin) .PHONY: package -package: $(dists) - -rpm_amd64 := amd64 -rpm_386 := i386 -rpm_s390x := s390x -rpm_arm5 := armel -rpm_arm6 := armv6hl -rpm_arm647 := aarch64 -rpm_arch = $(rpm_$(GOARCH)$(GOARM)) - -.PHONY: $(rpms) -$(rpms): - @$(MAKE) install - @mkdir -p $(pkgdir) - fpm --force \ - --log info \ - --architecture $(rpm_arch) \ - --input-type dir \ - --output-type rpm \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/rpm/post-install.sh \ - --before-install scripts/rpm/pre-install.sh \ - --after-remove scripts/rpm/post-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --depends coreutils \ - --depends shadow-utils \ - --rpm-posttrans scripts/rpm/post-install.sh \ - --name telegraf \ - --version $(version) \ - --iteration $(rpm_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -deb_amd64 := amd64 -deb_386 := i386 -deb_s390x := s390x -deb_arm5 := armel -deb_arm6 := armhf -deb_arm647 := arm64 -deb_mips := mips -deb_mipsle := mipsel -deb_arch = $(deb_$(GOARCH)$(GOARM)) - -.PHONY: $(debs) -$(debs): - @$(MAKE) install - @mkdir -pv $(pkgdir) - fpm --force \ - --log info \ - --architecture $(deb_arch) \ - --input-type dir \ - --output-type deb \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf.sample \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/deb/post-install.sh \ - --before-install scripts/deb/pre-install.sh \ - --after-remove scripts/deb/post-remove.sh \ - --before-remove scripts/deb/pre-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --name telegraf \ - --version $(version) \ - --iteration $(deb_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -.PHONY: $(zips) -$(zips): - @$(MAKE) install - @mkdir -p $(pkgdir) - (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/$@ +package: $(include_packages) -.PHONY: $(tars) -$(tars): +.PHONY: $(include_packages) +$(include_packages): @$(MAKE) install @mkdir -p $(pkgdir) - tar --owner 0 --group 0 -czvf $(pkgdir)/$@ -C $(dir $(DESTDIR)) . -.PHONY: upload-nightly -upload-nightly: - aws s3 sync $(pkgdir) s3://dl.influxdata.com/telegraf/nightlies/ \ - --exclude "*" \ - --include "*.tar.gz" \ - --include "*.deb" \ - --include "*.rpm" \ - --include "*.zip" \ - --acl public-read + @if [ "$(suffix $@)" = ".rpm" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type rpm \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/rpm/post-install.sh \ + --before-install scripts/rpm/pre-install.sh \ + --after-remove scripts/rpm/post-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --depends coreutils \ + --depends shadow-utils \ + --rpm-posttrans scripts/rpm/post-install.sh \ + --name telegraf \ + --version $(version) \ + --iteration $(rpm_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf-$(rpm_version).$@ ;\ + elif [ "$(suffix $@)" = ".deb" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type deb \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf.sample \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/deb/post-install.sh \ + --before-install scripts/deb/pre-install.sh \ + --after-remove scripts/deb/post-remove.sh \ + --before-remove scripts/deb/pre-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --name telegraf \ + --version $(version) \ + --iteration $(deb_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf_$(deb_version)_$@ ;\ + elif [ "$(suffix $@)" = ".zip" ]; then \ + (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/telegraf-$(tar_version)_$@ ;\ + elif [ "$(suffix $@)" = ".gz" ]; then \ + tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ + fi + +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 + +static_linux_amd64.tar.gz: export cgo := -nocgo +static_linux_amd64.tar.gz: export CGO_ENABLED := 0 -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOOS := linux -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOARCH := amd64 +i386.deb i386.rpm linux_i386.tar.gz: export GOOS := linux +i386.deb i386.rpm linux_i386.tar.gz: export GOARCH := 386 -%static_linux_amd64.tar.gz: export cgo := -nocgo -%static_linux_amd64.tar.gz: export CGO_ENABLED := 0 +armel.deb armel.rpm linux_armel.tar.gz: export GOOS := linux +armel.deb armel.rpm linux_armel.tar.gz: export GOARCH := arm +armel.deb armel.rpm linux_armel.tar.gz: export GOARM := 5 -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOOS := linux -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOARCH := 386 +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOOS := linux +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARCH := arm +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARM := 6 -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOOS := linux -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARCH := arm -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARM := 5 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOOS := linux +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARCH := arm64 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARM := 7 -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOOS := linux -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARCH := arm -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARM := 6 +mips.deb linux_mips.tar.gz: export GOOS := linux +mips.deb linux_mips.tar.gz: export GOARCH := mips -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOOS := linux -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARCH := arm64 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARM := 7 +mipsel.deb linux_mipsel.tar.gz: export GOOS := linux +mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle -%mips.deb %linux_mips.tar.gz: export GOOS := linux -%mips.deb %linux_mips.tar.gz: export GOARCH := mips +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x -%mipsel.deb %linux_mipsel.tar.gz: export GOOS := linux -%mipsel.deb %linux_mipsel.tar.gz: export GOARCH := mipsle +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOOS := linux +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOARCH := ppc64le -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x +freebsd_amd64.tar.gz: export GOOS := freebsd +freebsd_amd64.tar.gz: export GOARCH := amd64 -%freebsd_amd64.tar.gz: export GOOS := freebsd -%freebsd_amd64.tar.gz: export GOARCH := amd64 +freebsd_i386.tar.gz: export GOOS := freebsd +freebsd_i386.tar.gz: export GOARCH := 386 -%freebsd_i386.tar.gz: export GOOS := freebsd -%freebsd_i386.tar.gz: export GOARCH := 386 +freebsd_armv7.tar.gz: export GOOS := freebsd +freebsd_armv7.tar.gz: export GOARCH := arm +freebsd_armv7.tar.gz: export GOARM := 7 -%windows_amd64.zip: export GOOS := windows -%windows_amd64.zip: export GOARCH := amd64 +windows_amd64.zip: export GOOS := windows +windows_amd64.zip: export GOARCH := amd64 -%darwin_amd64.tar.gz: export GOOS := darwin -%darwin_amd64.tar.gz: export GOARCH := amd64 +darwin_amd64.tar.gz: export GOOS := darwin +darwin_amd64.tar.gz: export GOARCH := amd64 -%windows_i386.zip: export GOOS := windows -%windows_i386.zip: export GOARCH := 386 +windows_i386.zip: export GOOS := windows +windows_i386.zip: export GOARCH := 386 -%windows_i386.zip %windows_amd64.zip: export prefix = -%windows_i386.zip %windows_amd64.zip: export bindir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export sysconfdir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export localstatedir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export EXEEXT := .exe +windows_i386.zip windows_amd64.zip: export prefix = +windows_i386.zip windows_amd64.zip: export bindir = $(prefix) +windows_i386.zip windows_amd64.zip: export sysconfdir = $(prefix) +windows_i386.zip windows_amd64.zip: export localstatedir = $(prefix) +windows_i386.zip windows_amd64.zip: export EXEEXT := .exe %.deb: export pkg := deb %.deb: export prefix := /usr diff --git a/README.md b/README.md index 168db50fd6a24..122b20839db6b 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,28 @@ -# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) -[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) - -Telegraf is an agent for collecting, processing, aggregating, and writing metrics. - -Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting -metrics. -Telegraf is plugin-driven and has the concept of 4 distinct plugin types: +# Telegraf -1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs -2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics -3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) -4. [Output Plugins](#output-plugins) write metrics to various destinations +![tiger](TelegrafTiger.png "tiger") -New plugins are designed to be easy to contribute, pull requests are welcomed -and we work to incorporate as many pull requests as possible. - -## Try in Browser :rocket: +[![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) +[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) -You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). +Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a +plugin system to enable developers in the community to easily add support for additional +metric collection. There are four distinct types of plugins: -## Contributing +1. [Input Plugins](/docs/INPUTS.md) collect metrics from the system, services, or 3rd party APIs +2. [Processor Plugins](/docs/PROCESSORS.md) transform, decorate, and/or filter metrics +3. [Aggregator Plugins](/docs/AGGREGATORS.md) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) +4. [Output Plugins](/docs/OUTPUTS.md) write metrics to various destinations -There are many ways to contribute: -- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) -- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) -- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) -- [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)* +New plugins are designed to be easy to contribute, pull requests are welcomed, and we work to +incorporate as many pull requests as possible. Consider looking at the +[list of external plugins](EXTERNAL_PLUGINS.md) as well. ## Minimum Requirements Telegraf shares the same [minimum requirements][] as Go: + - Linux kernel version 2.6.23 or later - Windows 7 or later - FreeBSD 11.2 or later @@ -41,408 +30,133 @@ Telegraf shares the same [minimum requirements][] as Go: [minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements -## Installation: +## Obtaining Telegraf + +View the [changelog](/CHANGELOG.md) for the latest updates and changes by version. + +### Binary Downloads + +Binary downloads are available from the [InfluxData downloads](https://www.influxdata.com/downloads) +page or from each [GitHub Releases](https://github.com/influxdata/telegraf/releases) page. + +### Package Repository + +InfluxData also provides a package repo that contains both DEB and RPM downloads. -You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page -or from the [releases](https://github.com/influxdata/telegraf/releases) section. +For deb-based platforms (e.g. Ubuntu and Debian) run the following to add the +repo key and setup a new sources.list entry: -### Ansible Role: +```shell +wget -qO- https://repos.influxdata.com/influxdb.key | sudo tee /etc/apt/trusted.gpg.d/influxdb.asc >/dev/null +source /etc/os-release +echo "deb https://repos.influxdata.com/${ID} ${VERSION_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +sudo apt-get update && sudo apt-get install telegraf +``` -Ansible role: https://github.com/rossmcdonald/telegraf +For RPM-based platforms (e.g. RHEL, CentOS) use the following to create a repo +file and install telegraf: + +```shell +cat <=1.13 (1.15 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.17 (1.17.2 recommended) 2. Clone the Telegraf repository: - ``` - cd ~/src + + ```shell git clone https://github.com/influxdata/telegraf.git ``` + 3. Run `make` from the source directory - ``` - cd ~/src/telegraf + + ```shell + cd telegraf make ``` -### Changelog +### Nightly Builds -View the [changelog](/CHANGELOG.md) for the latest updates and changes by -version. +[Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. -### Nightly Builds +### 3rd Party Builds -These builds are generated from the master branch: -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) -- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) -- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) -- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) -- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) -- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) -- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) -- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) -- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) -- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) -- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) -- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) -- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) - -## How to use it: +Builds for other platforms or package formats are provided by members of theTelegraf community. +These packages are not built, tested, or supported by the Telegraf project or InfluxData. Please +get in touch with the package author if support is needed: + +- [Ansible Role](https://github.com/rossmcdonald/telegraf) +- [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) +- [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) +- [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) + +## Getting Started See usage with: -``` +```shell telegraf --help ``` -#### Generate a telegraf config file: +### Generate a telegraf config file -``` +```shell telegraf config > telegraf.conf ``` -#### Generate config with only cpu input & influxdb output plugins defined: +### Generate config with only cpu input & influxdb output plugins defined -``` +```shell telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` -#### Run a single telegraf collection, outputting metrics to stdout: +### Run a single telegraf collection, outputting metrics to stdout -``` +```shell telegraf --config telegraf.conf --test ``` -#### Run telegraf with all plugins defined in config file: +### Run telegraf with all plugins defined in config file -``` +```shell telegraf --config telegraf.conf ``` -#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: +### Run telegraf, enabling the cpu & memory input, and influxdb output plugins -``` +```shell telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ``` ## Documentation -[Latest Release Documentation][release docs]. - -For documentation on the latest development code see the [documentation index][devel docs]. - -[release docs]: https://docs.influxdata.com/telegraf -[devel docs]: docs - -## Input Plugins - -* [activemq](./plugins/inputs/activemq) -* [aerospike](./plugins/inputs/aerospike) -* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) -* [apache](./plugins/inputs/apache) -* [apcupsd](./plugins/inputs/apcupsd) -* [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) -* [azure_storage_queue](./plugins/inputs/azure_storage_queue) -* [bcache](./plugins/inputs/bcache) -* [beanstalkd](./plugins/inputs/beanstalkd) -* [bind](./plugins/inputs/bind) -* [bond](./plugins/inputs/bond) -* [burrow](./plugins/inputs/burrow) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [ceph](./plugins/inputs/ceph) -* [cgroup](./plugins/inputs/cgroup) -* [chrony](./plugins/inputs/chrony) -* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) (deprecated, renamed to [gnmi](/plugins/inputs/gnmi)) -* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) -* [clickhouse](./plugins/inputs/clickhouse) -* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub -* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint -* [conntrack](./plugins/inputs/conntrack) -* [consul](./plugins/inputs/consul) -* [couchbase](./plugins/inputs/couchbase) -* [couchdb](./plugins/inputs/couchdb) -* [cpu](./plugins/inputs/cpu) -* [DC/OS](./plugins/inputs/dcos) -* [diskio](./plugins/inputs/diskio) -* [disk](./plugins/inputs/disk) -* [disque](./plugins/inputs/disque) -* [dmcache](./plugins/inputs/dmcache) -* [dns query time](./plugins/inputs/dns_query) -* [docker](./plugins/inputs/docker) -* [docker_log](./plugins/inputs/docker_log) -* [dovecot](./plugins/inputs/dovecot) -* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) -* [elasticsearch](./plugins/inputs/elasticsearch) -* [ethtool](./plugins/inputs/ethtool) -* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) -* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) -* [execd](./plugins/inputs/execd) (generic executable "daemon" processes) -* [fail2ban](./plugins/inputs/fail2ban) -* [fibaro](./plugins/inputs/fibaro) -* [file](./plugins/inputs/file) -* [filestat](./plugins/inputs/filestat) -* [filecount](./plugins/inputs/filecount) -* [fireboard](/plugins/inputs/fireboard) -* [fluentd](./plugins/inputs/fluentd) -* [github](./plugins/inputs/github) -* [gnmi](./plugins/inputs/gnmi) -* [graylog](./plugins/inputs/graylog) -* [haproxy](./plugins/inputs/haproxy) -* [hddtemp](./plugins/inputs/hddtemp) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) -* [http_listener_v2](./plugins/inputs/http_listener_v2) -* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) -* [http_response](./plugins/inputs/http_response) -* [icinga2](./plugins/inputs/icinga2) -* [infiniband](./plugins/inputs/infiniband) -* [influxdb](./plugins/inputs/influxdb) -* [influxdb_listener](./plugins/inputs/influxdb_listener) -* [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) -* [intel_rdt](./plugins/inputs/intel_rdt) -* [internal](./plugins/inputs/internal) -* [interrupts](./plugins/inputs/interrupts) -* [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [ipset](./plugins/inputs/ipset) -* [iptables](./plugins/inputs/iptables) -* [ipvs](./plugins/inputs/ipvs) -* [jenkins](./plugins/inputs/jenkins) -* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [kapacitor](./plugins/inputs/kapacitor) -* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) -* [kernel](./plugins/inputs/kernel) -* [kernel_vmstat](./plugins/inputs/kernel_vmstat) -* [kibana](./plugins/inputs/kibana) -* [kubernetes](./plugins/inputs/kubernetes) -* [kube_inventory](./plugins/inputs/kube_inventory) -* [lanz](./plugins/inputs/lanz) -* [leofs](./plugins/inputs/leofs) -* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) -* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) -* [logstash](./plugins/inputs/logstash) -* [lustre2](./plugins/inputs/lustre2) -* [mailchimp](./plugins/inputs/mailchimp) -* [marklogic](./plugins/inputs/marklogic) -* [mcrouter](./plugins/inputs/mcrouter) -* [memcached](./plugins/inputs/memcached) -* [mem](./plugins/inputs/mem) -* [mesos](./plugins/inputs/mesos) -* [minecraft](./plugins/inputs/minecraft) -* [modbus](./plugins/inputs/modbus) -* [mongodb](./plugins/inputs/mongodb) -* [monit](./plugins/inputs/monit) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [multifile](./plugins/inputs/multifile) -* [mysql](./plugins/inputs/mysql) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nats](./plugins/inputs/nats) -* [neptune_apex](./plugins/inputs/neptune_apex) -* [net](./plugins/inputs/net) -* [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/net) -* [nginx](./plugins/inputs/nginx) -* [nginx_plus_api](./plugins/inputs/nginx_plus_api) -* [nginx_plus](./plugins/inputs/nginx_plus) -* [nginx_sts](./plugins/inputs/nginx_sts) -* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) -* [nginx_vts](./plugins/inputs/nginx_vts) -* [nsd](./plugins/inputs/nsd) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [nsq](./plugins/inputs/nsq) -* [nstat](./plugins/inputs/nstat) -* [ntpq](./plugins/inputs/ntpq) -* [nvidia_smi](./plugins/inputs/nvidia_smi) -* [opcua](./plugins/inputs/opcua) -* [openldap](./plugins/inputs/openldap) -* [openntpd](./plugins/inputs/openntpd) -* [opensmtpd](./plugins/inputs/opensmtpd) -* [openweathermap](./plugins/inputs/openweathermap) -* [pf](./plugins/inputs/pf) -* [pgbouncer](./plugins/inputs/pgbouncer) -* [phpfpm](./plugins/inputs/phpfpm) -* [phusion passenger](./plugins/inputs/passenger) -* [ping](./plugins/inputs/ping) -* [postfix](./plugins/inputs/postfix) -* [postgresql_extensible](./plugins/inputs/postgresql_extensible) -* [postgresql](./plugins/inputs/postgresql) -* [powerdns](./plugins/inputs/powerdns) -* [powerdns_recursor](./plugins/inputs/powerdns_recursor) -* [processes](./plugins/inputs/processes) -* [procstat](./plugins/inputs/procstat) -* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) -* [proxmox](./plugins/inputs/proxmox) -* [puppetagent](./plugins/inputs/puppetagent) -* [rabbitmq](./plugins/inputs/rabbitmq) -* [raindrops](./plugins/inputs/raindrops) -* [ras](./plugins/inputs/ras) -* [redfish](./plugins/inputs/redfish) -* [redis](./plugins/inputs/redis) -* [rethinkdb](./plugins/inputs/rethinkdb) -* [riak](./plugins/inputs/riak) -* [salesforce](./plugins/inputs/salesforce) -* [sensors](./plugins/inputs/sensors) -* [sflow](./plugins/inputs/sflow) -* [smart](./plugins/inputs/smart) -* [snmp_legacy](./plugins/inputs/snmp_legacy) -* [snmp](./plugins/inputs/snmp) -* [snmp_trap](./plugins/inputs/snmp_trap) -* [socket_listener](./plugins/inputs/socket_listener) -* [solr](./plugins/inputs/solr) -* [sql server](./plugins/inputs/sqlserver) (microsoft) -* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) -* [statsd](./plugins/inputs/statsd) -* [suricata](./plugins/inputs/suricata) -* [swap](./plugins/inputs/swap) -* [synproxy](./plugins/inputs/synproxy) -* [syslog](./plugins/inputs/syslog) -* [sysstat](./plugins/inputs/sysstat) -* [systemd_units](./plugins/inputs/systemd_units) -* [system](./plugins/inputs/system) -* [tail](./plugins/inputs/tail) -* [temp](./plugins/inputs/temp) -* [tcp_listener](./plugins/inputs/socket_listener) -* [teamspeak](./plugins/inputs/teamspeak) -* [tengine](./plugins/inputs/tengine) -* [tomcat](./plugins/inputs/tomcat) -* [twemproxy](./plugins/inputs/twemproxy) -* [udp_listener](./plugins/inputs/socket_listener) -* [unbound](./plugins/inputs/unbound) -* [uwsgi](./plugins/inputs/uwsgi) -* [varnish](./plugins/inputs/varnish) -* [vsphere](./plugins/inputs/vsphere) VMware vSphere -* [webhooks](./plugins/inputs/webhooks) - * [filestack](./plugins/inputs/webhooks/filestack) - * [github](./plugins/inputs/webhooks/github) - * [mandrill](./plugins/inputs/webhooks/mandrill) - * [papertrail](./plugins/inputs/webhooks/papertrail) - * [particle](./plugins/inputs/webhooks/particle) - * [rollbar](./plugins/inputs/webhooks/rollbar) -* [win_eventlog](./plugins/inputs/win_eventlog) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [wireguard](./plugins/inputs/wireguard) -* [wireless](./plugins/inputs/wireless) -* [x509_cert](./plugins/inputs/x509_cert) -* [zfs](./plugins/inputs/zfs) -* [zipkin](./plugins/inputs/zipkin) -* [zookeeper](./plugins/inputs/zookeeper) - -## Parsers - -- [InfluxDB Line Protocol](/plugins/parsers/influx) -- [Collectd](/plugins/parsers/collectd) -- [CSV](/plugins/parsers/csv) -- [Dropwizard](/plugins/parsers/dropwizard) -- [FormUrlencoded](/plugins/parser/form_urlencoded) -- [Graphite](/plugins/parsers/graphite) -- [Grok](/plugins/parsers/grok) -- [JSON](/plugins/parsers/json) -- [Logfmt](/plugins/parsers/logfmt) -- [Nagios](/plugins/parsers/nagios) -- [Value](/plugins/parsers/value), ie: 45 or "booyah" -- [Wavefront](/plugins/parsers/wavefront) - -## Serializers - -- [InfluxDB Line Protocol](/plugins/serializers/influx) -- [JSON](/plugins/serializers/json) -- [Graphite](/plugins/serializers/graphite) -- [ServiceNow](/plugins/serializers/nowmetric) -- [SplunkMetric](/plugins/serializers/splunkmetric) -- [Carbon2](/plugins/serializers/carbon2) -- [Wavefront](/plugins/serializers/wavefront) - -## Processor Plugins - -* [clone](/plugins/processors/clone) -* [converter](/plugins/processors/converter) -* [date](/plugins/processors/date) -* [dedup](/plugins/processors/dedup) -* [defaults](/plugins/processors/defaults) -* [enum](/plugins/processors/enum) -* [execd](/plugins/processors/execd) -* [ifname](/plugins/processors/ifname) -* [filepath](/plugins/processors/filepath) -* [override](/plugins/processors/override) -* [parser](/plugins/processors/parser) -* [pivot](/plugins/processors/pivot) -* [port_name](/plugins/processors/port_name) -* [printer](/plugins/processors/printer) -* [regex](/plugins/processors/regex) -* [rename](/plugins/processors/rename) -* [reverse_dns](/plugins/processors/reverse_dns) -* [s2geo](/plugins/processors/s2geo) -* [starlark](/plugins/processors/starlark) -* [strings](/plugins/processors/strings) -* [tag_limit](/plugins/processors/tag_limit) -* [template](/plugins/processors/template) -* [topk](/plugins/processors/topk) -* [unpivot](/plugins/processors/unpivot) - -## Aggregator Plugins - -* [basicstats](./plugins/aggregators/basicstats) -* [final](./plugins/aggregators/final) -* [histogram](./plugins/aggregators/histogram) -* [merge](./plugins/aggregators/merge) -* [minmax](./plugins/aggregators/minmax) -* [valuecounter](./plugins/aggregators/valuecounter) - -## Output Plugins - -* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) -* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) -* [amon](./plugins/outputs/amon) -* [amqp](./plugins/outputs/amqp) (rabbitmq) -* [application_insights](./plugins/outputs/application_insights) -* [aws kinesis](./plugins/outputs/kinesis) -* [aws cloudwatch](./plugins/outputs/cloudwatch) -* [azure_monitor](./plugins/outputs/azure_monitor) -* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub -* [cratedb](./plugins/outputs/cratedb) -* [datadog](./plugins/outputs/datadog) -* [discard](./plugins/outputs/discard) -* [dynatrace](./plugins/outputs/dynatrace) -* [elasticsearch](./plugins/outputs/elasticsearch) -* [exec](./plugins/outputs/exec) -* [execd](./plugins/outputs/execd) -* [file](./plugins/outputs/file) -* [graphite](./plugins/outputs/graphite) -* [graylog](./plugins/outputs/graylog) -* [health](./plugins/outputs/health) -* [http](./plugins/outputs/http) -* [instrumental](./plugins/outputs/instrumental) -* [kafka](./plugins/outputs/kafka) -* [librato](./plugins/outputs/librato) -* [mqtt](./plugins/outputs/mqtt) -* [nats](./plugins/outputs/nats) -* [newrelic](./plugins/outputs/newrelic) -* [nsq](./plugins/outputs/nsq) -* [opentsdb](./plugins/outputs/opentsdb) -* [prometheus](./plugins/outputs/prometheus_client) -* [riemann](./plugins/outputs/riemann) -* [riemann_legacy](./plugins/outputs/riemann_legacy) -* [socket_writer](./plugins/outputs/socket_writer) -* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) -* [syslog](./plugins/outputs/syslog) -* [tcp](./plugins/outputs/socket_writer) -* [udp](./plugins/outputs/socket_writer) -* [warp10](./plugins/outputs/warp10) -* [wavefront](./plugins/outputs/wavefront) -* [sumologic](./plugins/outputs/sumologic) +[Latest Release Documentation](https://docs.influxdata.com/telegraf/latest/) + +For documentation on the latest development code see the [documentation index](/docs). + +- [Input Plugins](/docs/INPUTS.md) +- [Output Plugins](/docs/OUTPUTS.md) +- [Processor Plugins](/docs/PROCESSORS.md) +- [Aggregator Plugins](/docs/AGGREGATORS.md) + +## Contributing + +There are many ways to contribute: + +- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) +- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) +- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) +- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) +- [Contribute plugins](CONTRIBUTING.md) +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000000..1d74711aa9079 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,6 @@ +# Security Policy + +## Reporting a Vulnerability + +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, +please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, can be found [here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). diff --git a/TelegrafTiger.png b/TelegrafTiger.png new file mode 100644 index 0000000000000..f6765a8d77b42 Binary files /dev/null and b/TelegrafTiger.png differ diff --git a/agent/accumulator.go b/agent/accumulator.go index 65000fd98a541..3683b6767d47f 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -90,10 +90,7 @@ func (ac *accumulator) addFields( tp telegraf.ValueType, t ...time.Time, ) { - m, err := metric.New(measurement, tags, fields, ac.getTime(t), tp) - if err != nil { - return - } + m := metric.New(measurement, tags, fields, ac.getTime(t), tp) if m := ac.maker.MakeMetric(m); m != nil { ac.metrics <- m } diff --git a/agent/agent.go b/agent/agent.go index e7ffee322ff20..7bd6b108df048 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -98,8 +98,8 @@ type outputUnit struct { func (a *Agent) Run(ctx context.Context) error { log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ "Flush Interval:%s", - a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) + time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, + a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) log.Printf("D! [agent] Initializing plugins") err := a.initPlugins() @@ -126,10 +126,7 @@ func (a *Agent) Run(ctx context.Context) error { } } - next, au, err = a.startAggregators(aggC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(aggC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -149,29 +146,20 @@ func (a *Agent) Run(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - err := a.runOutputs(ou) - if err != nil { - log.Printf("E! [agent] Error running outputs: %v", err) - } + a.runOutputs(ou) }() if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -179,20 +167,14 @@ func (a *Agent) Run(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.runInputs(ctx, startTime, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.runInputs(ctx, startTime, iu) }() wg.Wait() @@ -288,23 +270,23 @@ func (a *Agent) runInputs( ctx context.Context, startTime time.Time, unit *inputUnit, -) error { +) { var wg sync.WaitGroup for _, input := range unit.inputs { // Overwrite agent interval if this plugin has its own. - interval := a.Config.Agent.Interval.Duration + interval := time.Duration(a.Config.Agent.Interval) if input.Config.Interval != 0 { interval = input.Config.Interval } // Overwrite agent precision if this plugin has its own. - precision := a.Config.Agent.Precision.Duration + precision := time.Duration(a.Config.Agent.Precision) if input.Config.Precision != 0 { precision = input.Config.Precision } // Overwrite agent collection_jitter if this plugin has its own. - jitter := a.Config.Agent.CollectionJitter.Duration + jitter := time.Duration(a.Config.Agent.CollectionJitter) if input.Config.CollectionJitter != 0 { jitter = input.Config.CollectionJitter } @@ -334,8 +316,6 @@ func (a *Agent) runInputs( close(unit.dst) log.Printf("D! [agent] Input channel closed") - - return nil } // testStartInputs is a variation of startInputs for use in --test and --once @@ -344,7 +324,7 @@ func (a *Agent) runInputs( func (a *Agent) testStartInputs( dst chan<- telegraf.Metric, inputs []*models.RunningInput, -) (*inputUnit, error) { +) *inputUnit { log.Printf("D! [agent] Starting service inputs") unit := &inputUnit{ @@ -364,13 +344,12 @@ func (a *Agent) testStartInputs( if err != nil { log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err) } - } unit.inputs = append(unit.inputs, input) } - return unit, nil + return unit } // testRunInputs is a variation of runInputs for use in --test and --once mode. @@ -379,7 +358,7 @@ func (a *Agent) testRunInputs( ctx context.Context, wait time.Duration, unit *inputUnit, -) error { +) { var wg sync.WaitGroup nul := make(chan telegraf.Metric) @@ -394,13 +373,13 @@ func (a *Agent) testRunInputs( defer wg.Done() // Overwrite agent interval if this plugin has its own. - interval := a.Config.Agent.Interval.Duration + interval := time.Duration(a.Config.Agent.Interval) if input.Config.Interval != 0 { interval = input.Config.Interval } // Overwrite agent precision if this plugin has its own. - precision := a.Config.Agent.Precision.Duration + precision := time.Duration(a.Config.Agent.Precision) if input.Config.Precision != 0 { precision = input.Config.Precision } @@ -435,7 +414,6 @@ func (a *Agent) testRunInputs( close(unit.dst) log.Printf("D! [agent] Input channel closed") - return nil } // stopServiceInputs stops all service inputs. @@ -447,6 +425,13 @@ func stopServiceInputs(inputs []*models.RunningInput) { } } +// stopRunningOutputs stops all running outputs. +func stopRunningOutputs(outputs []*models.RunningOutput) { + for _, output := range outputs { + output.Close() + } +} + // gather runs an input's gather function periodically until the context is // done. func (a *Agent) gatherLoop( @@ -547,7 +532,7 @@ func (a *Agent) startProcessors( // closed and all metrics have been written. func (a *Agent) runProcessors( units []*processorUnit, -) error { +) { var wg sync.WaitGroup for _, unit := range units { wg.Add(1) @@ -567,8 +552,6 @@ func (a *Agent) runProcessors( }(unit) } wg.Wait() - - return nil } // startAggregators sets up the aggregator unit and returns the source channel. @@ -576,7 +559,7 @@ func (a *Agent) startAggregators( aggC chan<- telegraf.Metric, outputC chan<- telegraf.Metric, aggregators []*models.RunningAggregator, -) (chan<- telegraf.Metric, *aggregatorUnit, error) { +) (chan<- telegraf.Metric, *aggregatorUnit) { src := make(chan telegraf.Metric, 100) unit := &aggregatorUnit{ src: src, @@ -584,7 +567,7 @@ func (a *Agent) startAggregators( outputC: outputC, aggregators: aggregators, } - return src, unit, nil + return src, unit } // runAggregators beings aggregating metrics and runs until the source channel @@ -592,7 +575,7 @@ func (a *Agent) startAggregators( func (a *Agent) runAggregators( startTime time.Time, unit *aggregatorUnit, -) error { +) { ctx, cancel := context.WithCancel(context.Background()) // Before calling Add, initialize the aggregation window. This ensures @@ -628,8 +611,8 @@ func (a *Agent) runAggregators( go func(agg *models.RunningAggregator) { defer wg.Done() - interval := a.Config.Agent.Interval.Duration - precision := a.Config.Agent.Precision.Duration + interval := time.Duration(a.Config.Agent.Interval) + precision := time.Duration(a.Config.Agent.Precision) acc := NewAccumulator(agg, unit.aggC) acc.SetPrecision(getPrecision(precision, interval)) @@ -644,8 +627,6 @@ func (a *Agent) runAggregators( // processor chain will close the outputC when it finishes processing. close(unit.aggC) log.Printf("D! [agent] Aggregator channel closed") - - return nil } func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { @@ -738,12 +719,12 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) // written one last time and dropped if unsuccessful. func (a *Agent) runOutputs( unit *outputUnit, -) error { +) { var wg sync.WaitGroup // Start flush loop - interval := a.Config.Agent.FlushInterval.Duration - jitter := a.Config.Agent.FlushJitter.Duration + interval := time.Duration(a.Config.Agent.FlushInterval) + jitter := time.Duration(a.Config.Agent.FlushJitter) ctx, cancel := context.WithCancel(context.Background()) @@ -785,7 +766,8 @@ func (a *Agent) runOutputs( cancel() wg.Wait() - return nil + log.Println("I! [agent] Stopping running outputs") + stopRunningOutputs(unit.outputs) } // flushLoop runs an output's flush function periodically until the context is @@ -919,10 +901,7 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel } } - next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(procC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -933,30 +912,20 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel } } - iu, err := a.testStartInputs(next, a.Config.Inputs) - if err != nil { - return err - } + iu := a.testStartInputs(next, a.Config.Inputs) var wg sync.WaitGroup - if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -964,20 +933,14 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.testRunInputs(ctx, wait, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.testRunInputs(ctx, wait, iu) }() wg.Wait() @@ -1037,10 +1000,7 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { } } - next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(procC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -1051,38 +1011,26 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { } } - iu, err := a.testStartInputs(next, a.Config.Inputs) - if err != nil { - return err - } + iu := a.testStartInputs(next, a.Config.Inputs) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - err := a.runOutputs(ou) - if err != nil { - log.Printf("E! [agent] Error running outputs: %v", err) - } + a.runOutputs(ou) }() if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -1090,20 +1038,14 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.testRunInputs(ctx, wait, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.testRunInputs(ctx, wait, iu) }() wg.Wait() diff --git a/agent/agent_posix.go b/agent/agent_posix.go index 09552cac07026..e43c3a7817a88 100644 --- a/agent/agent_posix.go +++ b/agent/agent_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package agent diff --git a/agent/agent_windows.go b/agent/agent_windows.go index 94ed9d006acb2..3196dc70e78e2 100644 --- a/agent/agent_windows.go +++ b/agent/agent_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package agent diff --git a/agent/tick.go b/agent/tick.go index 91b99712a73b4..16233ba6d4adb 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -9,8 +9,6 @@ import ( "github.com/influxdata/telegraf/internal" ) -type empty struct{} - type Ticker interface { Elapsed() <-chan time.Time Stop() diff --git a/agent/tick_test.go b/agent/tick_test.go index 5b8db7e93d4c6..69bf0c2affa39 100644 --- a/agent/tick_test.go +++ b/agent/tick_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/require" ) -var format = "2006-01-02T15:04:05.999Z07:00" - func TestAlignedTicker(t *testing.T) { interval := 10 * time.Second jitter := 0 * time.Second @@ -249,7 +247,7 @@ func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution { for !clock.Now().After(until) { select { case tm := <-ticker.Elapsed(): - dist.Buckets[tm.Second()] += 1 + dist.Buckets[tm.Second()]++ dist.Count++ dist.Waittime += tm.Sub(last).Seconds() last = tm diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index b454c8dc8d9dd..0000000000000 --- a/appveyor.yml +++ /dev/null @@ -1,35 +0,0 @@ -version: "{build}" - -image: Visual Studio 2019 - -cache: - - C:\gopath\pkg\mod -> go.sum - - C:\ProgramData\chocolatey\bin -> appveyor.yml - - C:\ProgramData\chocolatey\lib -> appveyor.yml - -clone_folder: C:\gopath\src\github.com\influxdata\telegraf - -environment: - GOPATH: C:\gopath - -stack: go 1.14 - -platform: x64 - -install: - - choco install make - - cd "%GOPATH%\src\github.com\influxdata\telegraf" - - git config --system core.longpaths true - - go version - - go env - -build_script: - - make deps - - make telegraf - -test_script: - - make check - - make test-windows - -artifacts: - - path: telegraf.exe diff --git a/assets/icon.icns b/assets/icon.icns new file mode 100644 index 0000000000000..339a8daefdc24 Binary files /dev/null and b/assets/icon.icns differ diff --git a/build_version.txt b/build_version.txt new file mode 100644 index 0000000000000..0bd54efd31633 --- /dev/null +++ b/build_version.txt @@ -0,0 +1 @@ +1.20.4 diff --git a/cmd/telegraf/README.md b/cmd/telegraf/README.md new file mode 120000 index 0000000000000..162972fc44d34 --- /dev/null +++ b/cmd/telegraf/README.md @@ -0,0 +1 @@ +../../docs/COMMANDS_AND_FLAGS.md \ No newline at end of file diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 7e0b4ec1ca67a..688c1e5bdd6c5 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -15,6 +15,8 @@ import ( "syscall" "time" + "github.com/influxdata/tail/watch" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -26,8 +28,21 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" _ "github.com/influxdata/telegraf/plugins/processors/all" + "gopkg.in/tomb.v1" ) +type sliceFlags []string + +func (i *sliceFlags) String() string { + s := strings.Join(*i, " ") + return "[" + s + "]" +} + +func (i *sliceFlags) Set(value string) error { + *i = append(*i, value) + return nil +} + // If you update these, update usage.go and usage_windows.go var fDebug = flag.Bool("debug", false, "turn on debug logging") @@ -37,9 +52,10 @@ var fQuiet = flag.Bool("quiet", false, "run in quiet mode") var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit. Note: Test mode only runs inputs, not processors, aggregators, or outputs") var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode") -var fConfig = flag.String("config", "", "configuration file to load") -var fConfigDirectory = flag.String("config-directory", "", - "directory containing additional *.conf files") + +var fConfigs sliceFlags +var fConfigDirs sliceFlags +var fWatchConfig = flag.String("watch-config", "", "Monitoring config changes [notify, poll]") var fVersion = flag.Bool("version", false, "display the version and exit") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") @@ -60,11 +76,22 @@ var fProcessorFilters = flag.String("processor-filter", "", "filter the processors to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf --usage mysql'") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows var fService = flag.String("service", "", "operate on the service (windows only)") -var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)") -var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)") -var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fServiceName = flag.String("service-name", "telegraf", + "service name (windows only)") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", + "service display name (windows only)") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fRunAsConsole = flag.Bool("console", false, + "run as console application (windows only)") var fPlugins = flag.String("plugin-directory", "", "path to directory containing external plugins") var fRunOnce = flag.Bool("once", false, "run one gather and exit") @@ -80,19 +107,25 @@ var stop chan struct{} func reloadLoop( inputFilters []string, outputFilters []string, - aggregatorFilters []string, - processorFilters []string, ) { reload := make(chan bool, 1) reload <- true for <-reload { reload <- false - ctx, cancel := context.WithCancel(context.Background()) signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGINT) + if *fWatchConfig != "" { + for _, fConfig := range fConfigs { + if _, err := os.Stat(fConfig); err == nil { + go watchLocalConfig(signals, fConfig) + } else { + log.Printf("W! Cannot watch config %s: %s", fConfig, err) + } + } + } go func() { select { case sig := <-signals: @@ -114,6 +147,46 @@ func reloadLoop( } } +func watchLocalConfig(signals chan os.Signal, fConfig string) { + var mytomb tomb.Tomb + var watcher watch.FileWatcher + if *fWatchConfig == "poll" { + watcher = watch.NewPollingFileWatcher(fConfig) + } else { + watcher = watch.NewInotifyFileWatcher(fConfig) + } + changes, err := watcher.ChangeEvents(&mytomb, 0) + if err != nil { + log.Printf("E! Error watching config: %s\n", err) + return + } + log.Println("I! Config watcher started") + select { + case <-changes.Modified: + log.Println("I! Config file modified") + case <-changes.Deleted: + // deleted can mean moved. wait a bit a check existence + <-time.After(time.Second) + if _, err := os.Stat(fConfig); err == nil { + log.Println("I! Config file overwritten") + } else { + log.Println("W! Config file deleted") + if err := watcher.BlockUntilExists(&mytomb); err != nil { + log.Printf("E! Cannot watch for config: %s\n", err.Error()) + return + } + log.Println("I! Config file appeared") + } + case <-changes.Truncated: + log.Println("I! Config file truncated") + case <-mytomb.Dying(): + log.Println("I! Config watcher ended") + return + } + mytomb.Done() + signals <- syscall.SIGHUP +} + func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, @@ -124,17 +197,28 @@ func runAgent(ctx context.Context, c := config.NewConfig() c.OutputFilters = outputFilters c.InputFilters = inputFilters - err := c.LoadConfig(*fConfig) - if err != nil { - return err + var err error + // providing no "config" flag should load default config + if len(fConfigs) == 0 { + err = c.LoadConfig("") + if err != nil { + return err + } + } + for _, fConfig := range fConfigs { + err = c.LoadConfig(fConfig) + if err != nil { + return err + } } - if *fConfigDirectory != "" { - err = c.LoadDirectory(*fConfigDirectory) + for _, fConfigDirectory := range fConfigDirs { + err = c.LoadDirectory(fConfigDirectory) if err != nil { return err } } + if !*fTest && len(c.Outputs) == 0 { return errors.New("Error: no outputs found, did you provide a valid config file?") } @@ -142,14 +226,12 @@ func runAgent(ctx context.Context, return errors.New("Error: no inputs found, did you provide a valid config file?") } - if int64(c.Agent.Interval.Duration) <= 0 { - return fmt.Errorf("Agent interval must be positive, found %s", - c.Agent.Interval.Duration) + if int64(c.Agent.Interval) <= 0 { + return fmt.Errorf("Agent interval must be positive, found %v", c.Agent.Interval) } - if int64(c.Agent.FlushInterval.Duration) <= 0 { - return fmt.Errorf("Agent flush_interval must be positive; found %s", - c.Agent.Interval.Duration) + if int64(c.Agent.FlushInterval) <= 0 { + return fmt.Errorf("Agent flush_interval must be positive; found %v", c.Agent.Interval) } ag, err := agent.NewAgent(c) @@ -158,14 +240,16 @@ func runAgent(ctx context.Context, } // Setup logging as configured. + telegraf.Debug = ag.Config.Agent.Debug || *fDebug logConfig := logger.LogConfig{ - Debug: ag.Config.Agent.Debug || *fDebug, + Debug: telegraf.Debug, Quiet: ag.Config.Agent.Quiet || *fQuiet, LogTarget: ag.Config.Agent.LogTarget, Logfile: ag.Config.Agent.Logfile, RotationInterval: ag.Config.Agent.LogfileRotationInterval, RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize, RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives, + LogWithTimezone: ag.Config.Agent.LogWithTimezone, } logger.SetupLogging(logConfig) @@ -236,6 +320,9 @@ func formatFullVersion() string { } func main() { + flag.Var(&fConfigs, "config", "configuration file to load") + flag.Var(&fConfigDirs, "config-directory", "directory containing additional *.conf files") + flag.Usage = func() { usageExit(0) } flag.Parse() args := flag.Args() @@ -361,7 +448,5 @@ func main() { run( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } diff --git a/cmd/telegraf/telegraf_posix.go b/cmd/telegraf/telegraf_posix.go index ca28622f16752..21ad935b7147e 100644 --- a/cmd/telegraf/telegraf_posix.go +++ b/cmd/telegraf/telegraf_posix.go @@ -1,13 +1,12 @@ +//go:build !windows // +build !windows package main -func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func run(inputFilters, outputFilters []string) { stop = make(chan struct{}) reloadLoop( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 830e6eaa4f8a0..38222f2d0871d 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package main @@ -11,30 +12,27 @@ import ( "github.com/kardianos/service" ) -func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func run(inputFilters, outputFilters []string) { + // Register the eventlog logging target for windows. + logger.RegisterEventLogger(*fServiceName) + if runtime.GOOS == "windows" && windowsRunAsService() { runAsWindowsService( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } else { stop = make(chan struct{}) reloadLoop( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } } type program struct { - inputFilters []string - outputFilters []string - aggregatorFilters []string - processorFilters []string + inputFilters []string + outputFilters []string } func (p *program) Start(s service.Service) error { @@ -46,8 +44,6 @@ func (p *program) run() { reloadLoop( p.inputFilters, p.outputFilters, - p.aggregatorFilters, - p.processorFilters, ) } func (p *program) Stop(s service.Service) error { @@ -55,7 +51,7 @@ func (p *program) Stop(s service.Service) error { return nil } -func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func runAsWindowsService(inputFilters, outputFilters []string) { programFiles := os.Getenv("ProgramFiles") if programFiles == "" { // Should never happen programFiles = "C:\\Program Files" @@ -69,10 +65,8 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process } prg := &program{ - inputFilters: inputFilters, - outputFilters: outputFilters, - aggregatorFilters: aggregatorFilters, - processorFilters: processorFilters, + inputFilters: inputFilters, + outputFilters: outputFilters, } s, err := service.New(prg, svcConfig) if err != nil { @@ -81,12 +75,17 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process // Handle the --service flag here to prevent any issues with tooling that // may not have an interactive session, e.g. installing from Ansible. if *fService != "" { - if *fConfig != "" { - svcConfig.Arguments = []string{"--config", *fConfig} + if len(fConfigs) > 0 { + svcConfig.Arguments = []string{} + } + for _, fConfig := range fConfigs { + svcConfig.Arguments = append(svcConfig.Arguments, "--config", fConfig) } - if *fConfigDirectory != "" { - svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) + + for _, fConfigDirectory := range fConfigDirs { + svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", fConfigDirectory) } + //set servicename to service cmd line, to have a custom name after relaunch as a service svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) @@ -96,12 +95,7 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process } os.Exit(0) } else { - winlogger, err := s.Logger(nil) - if err == nil { - //When in service mode, register eventlog target andd setup default logging to eventlog - logger.RegisterEventLogger(winlogger) - logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) - } + logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) err = s.Run() if err != nil { diff --git a/config/README.md b/config/README.md new file mode 120000 index 0000000000000..5455122d9fbb5 --- /dev/null +++ b/config/README.md @@ -0,0 +1 @@ +../docs/CONFIGURATION.md \ No newline at end of file diff --git a/config/aws/credentials.go b/config/aws/credentials.go index 1e4f91b132a3b..358080ab3ba69 100644 --- a/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -1,52 +1,87 @@ package aws import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/session" + "context" + awsV2 "github.com/aws/aws-sdk-go-v2/aws" + configV2 "github.com/aws/aws-sdk-go-v2/config" + credentialsV2 "github.com/aws/aws-sdk-go-v2/credentials" + stscredsV2 "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/sts" ) type CredentialConfig struct { - Region string - AccessKey string - SecretKey string - RoleARN string - Profile string - Filename string - Token string - EndpointURL string + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + RoleSessionName string `toml:"role_session_name"` + WebIdentityTokenFile string `toml:"web_identity_token_file"` } -func (c *CredentialConfig) Credentials() client.ConfigProvider { +func (c *CredentialConfig) Credentials() (awsV2.Config, error) { if c.RoleARN != "" { return c.assumeCredentials() - } else { - return c.rootCredentials() } + return c.rootCredentials() } -func (c *CredentialConfig) rootCredentials() client.ConfigProvider { - config := &aws.Config{ - Region: aws.String(c.Region), - Endpoint: &c.EndpointURL, +func (c *CredentialConfig) rootCredentials() (awsV2.Config, error) { + options := []func(*configV2.LoadOptions) error{ + configV2.WithRegion(c.Region), + } + + if c.EndpointURL != "" { + resolver := awsV2.EndpointResolverFunc(func(service, region string) (awsV2.Endpoint, error) { + return awsV2.Endpoint{ + URL: c.EndpointURL, + HostnameImmutable: true, + Source: awsV2.EndpointSourceCustom, + }, nil + }) + options = append(options, configV2.WithEndpointResolver(resolver)) + } + + if c.Profile != "" { + options = append(options, configV2.WithSharedConfigProfile(c.Profile)) + } + if c.Filename != "" { + options = append(options, configV2.WithSharedCredentialsFiles([]string{c.Filename})) } + if c.AccessKey != "" || c.SecretKey != "" { - config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) - } else if c.Profile != "" || c.Filename != "" { - config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile) + provider := credentialsV2.NewStaticCredentialsProvider(c.AccessKey, c.SecretKey, c.Token) + options = append(options, configV2.WithCredentialsProvider(provider)) } - return session.New(config) + return configV2.LoadDefaultConfig(context.Background(), options...) } -func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { - rootCredentials := c.rootCredentials() - config := &aws.Config{ - Region: aws.String(c.Region), - Endpoint: &c.EndpointURL, +func (c *CredentialConfig) assumeCredentials() (awsV2.Config, error) { + rootCredentials, err := c.rootCredentials() + if err != nil { + return awsV2.Config{}, err } - config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) - return session.New(config) + + var provider awsV2.CredentialsProvider + stsService := sts.NewFromConfig(rootCredentials) + if c.WebIdentityTokenFile != "" { + provider = stscredsV2.NewWebIdentityRoleProvider(stsService, c.RoleARN, stscredsV2.IdentityTokenFile(c.WebIdentityTokenFile), func(opts *stscredsV2.WebIdentityRoleOptions) { + if c.RoleSessionName != "" { + opts.RoleSessionName = c.RoleSessionName + } + }) + } else { + provider = stscredsV2.NewAssumeRoleProvider(stsService, c.RoleARN, func(opts *stscredsV2.AssumeRoleOptions) { + if c.RoleSessionName != "" { + opts.RoleSessionName = c.RoleSessionName + } + }) + } + + rootCredentials.Credentials = awsV2.NewCredentialsCache(provider) + return rootCredentials, nil } diff --git a/config/config.go b/config/config.go index 4fd65139e2ab9..57cb9de479875 100644 --- a/config/config.go +++ b/config/config.go @@ -2,15 +2,14 @@ package config import ( "bytes" - "errors" "fmt" - "io/ioutil" + "io" "log" - "math" "net/http" "net/url" "os" "path/filepath" + "reflect" "regexp" "runtime" "sort" @@ -20,11 +19,13 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/json_v2" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/toml" @@ -50,12 +51,21 @@ var ( `"`, `\"`, `\`, `\\`, ) + httpLoadConfigRetryInterval = 10 * time.Second + + // fetchURLRe is a regex to determine whether the requested file should + // be fetched from a remote or read from the filesystem. + fetchURLRe = regexp.MustCompile(`^\w+://`) ) // Config specifies the URL/user/password for the database that telegraf // will be logging to, as well as all the plugins that the user has // specified type Config struct { + toml *toml.Config + errs []error // config load errors. + UnusedFields map[string]bool + Tags map[string]string InputFilters []string OutputFilters []string @@ -69,13 +79,18 @@ type Config struct { AggProcessors models.RunningProcessors } +// NewConfig creates a new struct to hold the Telegraf config. +// For historical reasons, It holds the actual instances of the running plugins +// once the configuration is parsed. func NewConfig() *Config { c := &Config{ + UnusedFields: map[string]bool{}, + // Agent defaults: Agent: &AgentConfig{ - Interval: internal.Duration{Duration: 10 * time.Second}, + Interval: Duration(10 * time.Second), RoundInterval: true, - FlushInterval: internal.Duration{Duration: 10 * time.Second}, + FlushInterval: Duration(10 * time.Second), LogTarget: "file", LogfileRotationMaxArchives: 5, }, @@ -88,12 +103,21 @@ func NewConfig() *Config { InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } + + tomlCfg := &toml.Config{ + NormFieldName: toml.DefaultConfig.NormFieldName, + FieldToKey: toml.DefaultConfig.FieldToKey, + MissingField: c.missingTomlField, + } + c.toml = tomlCfg + return c } +// AgentConfig defines configuration that will be used by the Telegraf agent type AgentConfig struct { // Interval at which to gather information - Interval internal.Duration + Interval Duration // RoundInterval rounds collection interval to 'interval'. // ie, if Interval=10s then always collect on :00, :10, :20, etc. @@ -105,22 +129,22 @@ type AgentConfig struct { // when interval = "250ms", precision will be "1ms" // Precision will NOT be used for service inputs. It is up to each individual // service input to set the timestamp at the appropriate precision. - Precision internal.Duration + Precision Duration // CollectionJitter is used to jitter the collection by a random amount. // Each plugin will sleep for a random time within jitter before collecting. // This can be used to avoid many plugins querying things like sysfs at the // same time, which can have a measurable effect on the system. - CollectionJitter internal.Duration + CollectionJitter Duration // FlushInterval is the Interval at which to flush data - FlushInterval internal.Duration + FlushInterval Duration // FlushJitter Jitters the flush interval by a random amount. // This is primarily to avoid large write spikes for users running a large // number of telegraf instances. // ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - FlushJitter internal.Duration + FlushJitter Duration // MetricBatchSize is the maximum number of metrics that is wrote to an // output plugin in one call. @@ -160,16 +184,19 @@ type AgentConfig struct { // The file will be rotated after the time interval specified. When set // to 0 no time based rotation is performed. - LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"` + LogfileRotationInterval Duration `toml:"logfile_rotation_interval"` // The logfile will be rotated when it becomes larger than the specified // size. When set to 0 no size based rotation is performed. - LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"` + LogfileRotationMaxSize Size `toml:"logfile_rotation_max_size"` // Maximum number of rotated archives to keep, any older logs are deleted. // If set to -1, no archives are removed. LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"` + // Pick a timezone to use when logging or type 'local' for local time. + LogWithTimezone string `toml:"log_with_timezone"` + Hostname string OmitHostname bool } @@ -338,11 +365,14 @@ var agentConfig = ` ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false - ` var outputHeader = ` @@ -623,7 +653,7 @@ func PrintInputConfig(name string) error { if creator, ok := inputs.Inputs[name]; ok { printConfig(name, creator(), "inputs", false) } else { - return errors.New(fmt.Sprintf("Input %s not found", name)) + return fmt.Errorf("Input %s not found", name) } return nil } @@ -633,11 +663,12 @@ func PrintOutputConfig(name string) error { if creator, ok := outputs.Outputs[name]; ok { printConfig(name, creator(), "outputs", false) } else { - return errors.New(fmt.Sprintf("Output %s not found", name)) + return fmt.Errorf("Output %s not found", name) } return nil } +// LoadDirectory loads all toml config files found in the specified path, recursively. func (c *Config) LoadDirectory(path string) error { walkfn := func(thispath string, info os.FileInfo, _ error) error { if info == nil { @@ -683,6 +714,10 @@ func getDefaultConfigPath() (string, error) { etcfile = programFiles + `\Telegraf\telegraf.conf` } for _, path := range []string{envfile, homefile, etcfile} { + if isURL(path) { + log.Printf("I! Using config url: %s", path) + return path, nil + } if _, err := os.Stat(path); err == nil { log.Printf("I! Using config file: %s", path) return path, nil @@ -694,6 +729,12 @@ func getDefaultConfigPath() (string, error) { " in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile) } +// isURL checks if string is valid url +func isURL(str string) bool { + u, err := url.Parse(str) + return err == nil && u.Scheme != "" && u.Host != "" +} + // LoadConfig loads the given config file and applies it to c func (c *Config) LoadConfig(path string) error { var err error @@ -727,8 +768,8 @@ func (c *Config) LoadConfigData(data []byte) error { if !ok { return fmt.Errorf("invalid configuration, bad table name %q", tableName) } - if err = toml.UnmarshalTable(subTable, c.Tags); err != nil { - return fmt.Errorf("error parsing table name %q: %w", tableName, err) + if err = c.toml.UnmarshalTable(subTable, c.Tags); err != nil { + return fmt.Errorf("error parsing table name %q: %s", tableName, err) } } } @@ -739,8 +780,8 @@ func (c *Config) LoadConfigData(data []byte) error { if !ok { return fmt.Errorf("invalid configuration, error parsing agent table") } - if err = toml.UnmarshalTable(subTable, c.Agent); err != nil { - return fmt.Errorf("error parsing agent table: %w", err) + if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil { + return fmt.Errorf("error parsing [agent]: %w", err) } } @@ -757,6 +798,10 @@ func (c *Config) LoadConfigData(data []byte) error { c.Tags["host"] = c.Agent.Hostname } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields)) + } + // Parse all the rest of the plugins: for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) @@ -772,18 +817,21 @@ func (c *Config) LoadConfigData(data []byte) error { // legacy [outputs.influxdb] support case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s array, %s", pluginName, err) + return fmt.Errorf("error parsing %s array, %w", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s", + return fmt.Errorf("unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "inputs", "plugins": for pluginName, pluginVal := range subTable.Fields { @@ -791,18 +839,21 @@ func (c *Config) LoadConfigData(data []byte) error { // legacy [inputs.cpu] support case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "processors": for pluginName, pluginVal := range subTable.Fields { @@ -810,13 +861,16 @@ func (c *Config) LoadConfigData(data []byte) error { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addProcessor(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "aggregators": for pluginName, pluginVal := range subTable.Fields { @@ -831,6 +885,9 @@ func (c *Config) LoadConfigData(data []byte) error { return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } // Assume it's an input input for legacy config file support if no other // identifiers are present @@ -861,19 +918,22 @@ func escapeEnv(value string) string { } func loadConfig(config string) ([]byte, error) { - u, err := url.Parse(config) - if err != nil { - return nil, err - } + if fetchURLRe.MatchString(config) { + u, err := url.Parse(config) + if err != nil { + return nil, err + } - switch u.Scheme { - case "https", "http": - return fetchConfig(u) - default: - // If it isn't a https scheme, try it as a file. + switch u.Scheme { + case "https", "http": + return fetchConfig(u) + default: + return nil, fmt.Errorf("scheme %q not supported", u.Scheme) + } } - return ioutil.ReadFile(config) + // If it isn't a https scheme, try it as a file + return os.ReadFile(config) } func fetchConfig(u *url.URL) ([]byte, error) { @@ -887,17 +947,27 @@ func fetchConfig(u *url.URL) ([]byte, error) { } req.Header.Add("Accept", "application/toml") req.Header.Set("User-Agent", internal.ProductToken()) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status) + retries := 3 + for i := 0; i <= retries; i++ { + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("Retry %d of %d failed connecting to HTTP config server %s", i, retries, err) + } + + if resp.StatusCode != http.StatusOK { + if i < retries { + log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, httpLoadConfigRetryInterval, resp.StatusCode) + time.Sleep(httpLoadConfigRetryInterval) + continue + } + return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) + } + defer resp.Body.Close() + return io.ReadAll(resp.Body) } - defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + return nil, nil } // parseConfig loads a TOML configuration from a provided path and @@ -912,19 +982,19 @@ func parseConfig(contents []byte) (*ast.Table, error) { continue } - var env_var []byte + var envVar []byte if parameter[1] != nil { - env_var = parameter[1] + envVar = parameter[1] } else if parameter[2] != nil { - env_var = parameter[2] + envVar = parameter[2] } else { continue } - env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$")) + envVal, ok := os.LookupEnv(strings.TrimPrefix(string(envVar), "$")) if ok { - env_val = escapeEnv(env_val) - contents = bytes.Replace(contents, parameter[0], []byte(env_val), 1) + envVal = escapeEnv(envVal) + contents = bytes.Replace(contents, parameter[0], []byte(envVal), 1) } } @@ -938,12 +1008,12 @@ func (c *Config) addAggregator(name string, table *ast.Table) error { } aggregator := creator() - conf, err := buildAggregator(name, table) + conf, err := c.buildAggregator(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, aggregator); err != nil { + if err := c.toml.UnmarshalTable(table, aggregator); err != nil { return err } @@ -957,19 +1027,19 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { return fmt.Errorf("Undefined but requested processor: %s", name) } - processorConfig, err := buildProcessor(name, table) + processorConfig, err := c.buildProcessor(name, table) if err != nil { return err } - rf, err := c.newRunningProcessor(creator, processorConfig, name, table) + rf, err := c.newRunningProcessor(creator, processorConfig, table) if err != nil { return err } c.Processors = append(c.Processors, rf) // save a copy for the aggregator - rf, err = c.newRunningProcessor(creator, processorConfig, name, table) + rf, err = c.newRunningProcessor(creator, processorConfig, table) if err != nil { return err } @@ -981,17 +1051,16 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { func (c *Config) newRunningProcessor( creator processors.StreamingCreator, processorConfig *models.ProcessorConfig, - name string, table *ast.Table, ) (*models.RunningProcessor, error) { processor := creator() if p, ok := processor.(unwrappable); ok { - if err := toml.UnmarshalTable(table, p.Unwrap()); err != nil { + if err := c.toml.UnmarshalTable(table, p.Unwrap()); err != nil { return nil, err } } else { - if err := toml.UnmarshalTable(table, processor); err != nil { + if err := c.toml.UnmarshalTable(table, processor); err != nil { return nil, err } } @@ -1014,24 +1083,23 @@ func (c *Config) addOutput(name string, table *ast.Table) error { // arbitrary types of output, so build the serializer and set it. switch t := output.(type) { case serializers.SerializerOutput: - serializer, err := buildSerializer(name, table) + serializer, err := c.buildSerializer(table) if err != nil { return err } t.SetSerializer(serializer) } - outputConfig, err := buildOutput(name, table) + outputConfig, err := c.buildOutput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, output); err != nil { + if err := c.toml.UnmarshalTable(table, output); err != nil { return err } - ro := models.NewRunningOutput(name, output, outputConfig, - c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) + ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Outputs = append(c.Outputs, ro) return nil } @@ -1054,7 +1122,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { // If the input has a SetParser function, then this means it can accept // arbitrary types of input, so build the parser and set it. if t, ok := input.(parsers.ParserInput); ok { - parser, err := buildParser(name, table) + parser, err := c.buildParser(name, table) if err != nil { return err } @@ -1062,7 +1130,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { } if t, ok := input.(parsers.ParserFuncInput); ok { - config, err := getParserConfig(name, table) + config, err := c.getParserConfig(name, table) if err != nil { return err } @@ -1071,12 +1139,12 @@ func (c *Config) addInput(name string, table *ast.Table) error { }) } - pluginConfig, err := buildInput(name, table) + pluginConfig, err := c.buildInput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, input); err != nil { + if err := c.toml.UnmarshalTable(table, input); err != nil { return err } @@ -1089,7 +1157,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { // buildAggregator parses Aggregator specific items from the ast.Table, // builds the filter and returns a // models.AggregatorConfig to be inserted into models.RunningAggregator -func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { +func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { conf := &models.AggregatorConfig{ Name: name, Delay: time.Millisecond * 100, @@ -1097,79 +1165,30 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err Grace: time.Second * 0, } - if err := getConfigDuration(tbl, "period", &conf.Period); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "delay", &conf.Delay); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "grace", &conf.Grace); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["drop_original"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - conf.DropOriginal, err = strconv.ParseBool(b.Value) - if err != nil { - return nil, fmt.Errorf("error parsing boolean value for %s: %s", name, err) - } - } - } - } - - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.MeasurementPrefix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.MeasurementSuffix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.NameOverride = str.Value - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.Alias = str.Value - } - } - } + c.getFieldDuration(tbl, "period", &conf.Period) + c.getFieldDuration(tbl, "delay", &conf.Delay) + c.getFieldDuration(tbl, "grace", &conf.Grace) + c.getFieldBool(tbl, "drop_original", &conf.DropOriginal) + c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix) + c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix) + c.getFieldString(tbl, "name_override", &conf.NameOverride) + c.getFieldString(tbl, "alias", &conf.Alias) conf.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { - if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil { + if err := c.toml.UnmarshalTable(subtbl, conf.Tags); err != nil { return nil, fmt.Errorf("could not parse tags for input %s", name) } } } - delete(tbl.Fields, "drop_original") - delete(tbl.Fields, "name_prefix") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "tags") + if c.hasErrs() { + return nil, c.firstErr() + } + var err error - conf.Filter, err = buildFilter(tbl) + conf.Filter, err = c.buildFilter(tbl) if err != nil { return conf, err } @@ -1179,33 +1198,18 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err // buildProcessor parses Processor specific items from the ast.Table, // builds the filter and returns a // models.ProcessorConfig to be inserted into models.RunningProcessor -func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { +func (c *Config) buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} - if node, ok := tbl.Fields["order"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Integer); ok { - var err error - conf.Order, err = strconv.ParseInt(b.Value, 10, 64) - if err != nil { - return nil, fmt.Errorf("error parsing int value for %s: %s", name, err) - } - } - } - } + c.getFieldInt64(tbl, "order", &conf.Order) + c.getFieldString(tbl, "alias", &conf.Alias) - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.Alias = str.Value - } - } + if c.hasErrs() { + return nil, c.firstErr() } - delete(tbl.Fields, "alias") - delete(tbl.Fields, "order") var err error - conf.Filter, err = buildFilter(tbl) + conf.Filter, err = c.buildFilter(tbl) if err != nil { return conf, err } @@ -1216,205 +1220,63 @@ func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // be inserted into the models.OutputConfig/models.InputConfig // to be used for glob filtering on tags and measurements -func buildFilter(tbl *ast.Table) (models.Filter, error) { +func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) { f := models.Filter{} - if node, ok := tbl.Fields["namepass"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.NamePass = append(f.NamePass, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["namedrop"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.NameDrop = append(f.NameDrop, str.Value) - } - } - } - } - } + c.getFieldStringSlice(tbl, "namepass", &f.NamePass) + c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop) - fields := []string{"pass", "fieldpass"} - for _, field := range fields { - if node, ok := tbl.Fields[field]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.FieldPass = append(f.FieldPass, str.Value) - } - } - } - } - } - } + c.getFieldStringSlice(tbl, "pass", &f.FieldPass) + c.getFieldStringSlice(tbl, "fieldpass", &f.FieldPass) - fields = []string{"drop", "fielddrop"} - for _, field := range fields { - if node, ok := tbl.Fields[field]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.FieldDrop = append(f.FieldDrop, str.Value) - } - } - } - } - } - } + c.getFieldStringSlice(tbl, "drop", &f.FieldDrop) + c.getFieldStringSlice(tbl, "fielddrop", &f.FieldDrop) - if node, ok := tbl.Fields["tagpass"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { - if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &models.TagFilter{Name: name} - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - tagfilter.Filter = append(tagfilter.Filter, str.Value) - } - } - } - f.TagPass = append(f.TagPass, *tagfilter) - } - } - } - } + c.getFieldTagFilter(tbl, "tagpass", &f.TagPass) + c.getFieldTagFilter(tbl, "tagdrop", &f.TagDrop) - if node, ok := tbl.Fields["tagdrop"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { - if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &models.TagFilter{Name: name} - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - tagfilter.Filter = append(tagfilter.Filter, str.Value) - } - } - } - f.TagDrop = append(f.TagDrop, *tagfilter) - } - } - } - } + c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude) + c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude) - if node, ok := tbl.Fields["tagexclude"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.TagExclude = append(f.TagExclude, str.Value) - } - } - } - } + if c.hasErrs() { + return f, c.firstErr() } - if node, ok := tbl.Fields["taginclude"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.TagInclude = append(f.TagInclude, str.Value) - } - } - } - } - } if err := f.Compile(); err != nil { return f, err } - delete(tbl.Fields, "namedrop") - delete(tbl.Fields, "namepass") - delete(tbl.Fields, "fielddrop") - delete(tbl.Fields, "fieldpass") - delete(tbl.Fields, "drop") - delete(tbl.Fields, "pass") - delete(tbl.Fields, "tagdrop") - delete(tbl.Fields, "tagpass") - delete(tbl.Fields, "tagexclude") - delete(tbl.Fields, "taginclude") return f, nil } // buildInput parses input specific items from the ast.Table, // builds the filter and returns a // models.InputConfig to be inserted into models.RunningInput -func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { +func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { cp := &models.InputConfig{Name: name} - - if err := getConfigDuration(tbl, "interval", &cp.Interval); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "precision", &cp.Precision); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "collection_jitter", &cp.CollectionJitter); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.MeasurementPrefix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.MeasurementSuffix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.NameOverride = str.Value - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.Alias = str.Value - } - } - } + c.getFieldDuration(tbl, "interval", &cp.Interval) + c.getFieldDuration(tbl, "precision", &cp.Precision) + c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter) + c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix) + c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix) + c.getFieldString(tbl, "name_override", &cp.NameOverride) + c.getFieldString(tbl, "alias", &cp.Alias) cp.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { - if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil { - return nil, fmt.Errorf("could not parse tags for input %s\n", name) + if err := c.toml.UnmarshalTable(subtbl, cp.Tags); err != nil { + return nil, fmt.Errorf("could not parse tags for input %s", name) } } } - delete(tbl.Fields, "name_prefix") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "tags") + if c.hasErrs() { + return nil, c.firstErr() + } + var err error - cp.Filter, err = buildFilter(tbl) + cp.Filter, err = c.buildFilter(tbl) if err != nil { return cp, err } @@ -1424,796 +1286,463 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { // buildParser grabs the necessary entries from the ast.Table for creating // a parsers.Parser object, and creates it, which can then be added onto // an Input object. -func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { - config, err := getParserConfig(name, tbl) +func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { + config, err := c.getParserConfig(name, tbl) + if err != nil { + return nil, err + } + parser, err := parsers.NewParser(config) if err != nil { return nil, err } - return parsers.NewParser(config) + logger := models.NewLogger("parsers", config.DataFormat, name) + models.SetLoggerOnPlugin(parser, logger) + if initializer, ok := parser.(telegraf.Initializer); ok { + if err := initializer.Init(); err != nil { + return nil, err + } + } + + return parser, nil } -func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { - c := &parsers.Config{ +func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { + pc := &parsers.Config{ JSONStrict: true, } - if node, ok := tbl.Fields["data_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataFormat = str.Value - } - } - } + c.getFieldString(tbl, "data_format", &pc.DataFormat) // Legacy support, exec plugin originally parsed JSON by default. - if name == "exec" && c.DataFormat == "" { - c.DataFormat = "json" - } else if c.DataFormat == "" { - c.DataFormat = "influx" - } + if name == "exec" && pc.DataFormat == "" { + pc.DataFormat = "json" + } else if pc.DataFormat == "" { + pc.DataFormat = "influx" + } + + c.getFieldString(tbl, "separator", &pc.Separator) + + c.getFieldStringSlice(tbl, "templates", &pc.Templates) + c.getFieldStringSlice(tbl, "tag_keys", &pc.TagKeys) + c.getFieldStringSlice(tbl, "json_string_fields", &pc.JSONStringFields) + c.getFieldString(tbl, "json_name_key", &pc.JSONNameKey) + c.getFieldString(tbl, "json_query", &pc.JSONQuery) + c.getFieldString(tbl, "json_time_key", &pc.JSONTimeKey) + c.getFieldString(tbl, "json_time_format", &pc.JSONTimeFormat) + c.getFieldString(tbl, "json_timezone", &pc.JSONTimezone) + c.getFieldBool(tbl, "json_strict", &pc.JSONStrict) + c.getFieldString(tbl, "data_type", &pc.DataType) + c.getFieldString(tbl, "collectd_auth_file", &pc.CollectdAuthFile) + c.getFieldString(tbl, "collectd_security_level", &pc.CollectdSecurityLevel) + c.getFieldString(tbl, "collectd_parse_multivalue", &pc.CollectdSplit) + + c.getFieldStringSlice(tbl, "collectd_typesdb", &pc.CollectdTypesDB) + + c.getFieldString(tbl, "dropwizard_metric_registry_path", &pc.DropwizardMetricRegistryPath) + c.getFieldString(tbl, "dropwizard_time_path", &pc.DropwizardTimePath) + c.getFieldString(tbl, "dropwizard_time_format", &pc.DropwizardTimeFormat) + c.getFieldString(tbl, "dropwizard_tags_path", &pc.DropwizardTagsPath) + c.getFieldStringMap(tbl, "dropwizard_tag_paths", &pc.DropwizardTagPathsMap) - if node, ok := tbl.Fields["separator"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Separator = str.Value - } - } - } + //for grok data_format + c.getFieldStringSlice(tbl, "grok_named_patterns", &pc.GrokNamedPatterns) + c.getFieldStringSlice(tbl, "grok_patterns", &pc.GrokPatterns) + c.getFieldString(tbl, "grok_custom_patterns", &pc.GrokCustomPatterns) + c.getFieldStringSlice(tbl, "grok_custom_pattern_files", &pc.GrokCustomPatternFiles) + c.getFieldString(tbl, "grok_timezone", &pc.GrokTimezone) + c.getFieldString(tbl, "grok_unique_timestamp", &pc.GrokUniqueTimestamp) - if node, ok := tbl.Fields["templates"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.Templates = append(c.Templates, str.Value) - } + //for csv parser + c.getFieldStringSlice(tbl, "csv_column_names", &pc.CSVColumnNames) + c.getFieldStringSlice(tbl, "csv_column_types", &pc.CSVColumnTypes) + c.getFieldStringSlice(tbl, "csv_tag_columns", &pc.CSVTagColumns) + c.getFieldString(tbl, "csv_timezone", &pc.CSVTimezone) + c.getFieldString(tbl, "csv_delimiter", &pc.CSVDelimiter) + c.getFieldString(tbl, "csv_comment", &pc.CSVComment) + c.getFieldString(tbl, "csv_measurement_column", &pc.CSVMeasurementColumn) + c.getFieldString(tbl, "csv_timestamp_column", &pc.CSVTimestampColumn) + c.getFieldString(tbl, "csv_timestamp_format", &pc.CSVTimestampFormat) + c.getFieldInt(tbl, "csv_header_row_count", &pc.CSVHeaderRowCount) + c.getFieldInt(tbl, "csv_skip_rows", &pc.CSVSkipRows) + c.getFieldInt(tbl, "csv_skip_columns", &pc.CSVSkipColumns) + c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace) + c.getFieldStringSlice(tbl, "csv_skip_values", &pc.CSVSkipValues) + + c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) + + c.getFieldString(tbl, "value_field_name", &pc.ValueFieldName) + + //for XPath parser family + if choice.Contains(pc.DataFormat, []string{"xml", "xpath_json", "xpath_msgpack", "xpath_protobuf"}) { + c.getFieldString(tbl, "xpath_protobuf_file", &pc.XPathProtobufFile) + c.getFieldString(tbl, "xpath_protobuf_type", &pc.XPathProtobufType) + c.getFieldBool(tbl, "xpath_print_document", &pc.XPathPrintDocument) + + // Determine the actual xpath configuration tables + node, xpathOK := tbl.Fields["xpath"] + if !xpathOK { + // Add this for backward compatibility + node, xpathOK = tbl.Fields[pc.DataFormat] + } + if xpathOK { + if subtbls, ok := node.([]*ast.Table); ok { + pc.XPathConfig = make([]parsers.XPathConfig, len(subtbls)) + for i, subtbl := range subtbls { + subcfg := pc.XPathConfig[i] + c.getFieldString(subtbl, "metric_name", &subcfg.MetricQuery) + c.getFieldString(subtbl, "metric_selection", &subcfg.Selection) + c.getFieldString(subtbl, "timestamp", &subcfg.Timestamp) + c.getFieldString(subtbl, "timestamp_format", &subcfg.TimestampFmt) + c.getFieldStringMap(subtbl, "tags", &subcfg.Tags) + c.getFieldStringMap(subtbl, "fields", &subcfg.Fields) + c.getFieldStringMap(subtbl, "fields_int", &subcfg.FieldsInt) + c.getFieldString(subtbl, "field_selection", &subcfg.FieldSelection) + c.getFieldBool(subtbl, "field_name_expansion", &subcfg.FieldNameExpand) + c.getFieldString(subtbl, "field_name", &subcfg.FieldNameQuery) + c.getFieldString(subtbl, "field_value", &subcfg.FieldValueQuery) + pc.XPathConfig[i] = subcfg } } } } - if node, ok := tbl.Fields["tag_keys"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.TagKeys = append(c.TagKeys, str.Value) + //for JSONPath parser + if node, ok := tbl.Fields["json_v2"]; ok { + if metricConfigs, ok := node.([]*ast.Table); ok { + pc.JSONV2Config = make([]parsers.JSONV2Config, len(metricConfigs)) + for i, metricConfig := range metricConfigs { + mc := pc.JSONV2Config[i] + c.getFieldString(metricConfig, "measurement_name", &mc.MeasurementName) + if mc.MeasurementName == "" { + mc.MeasurementName = name + } + c.getFieldString(metricConfig, "measurement_name_path", &mc.MeasurementNamePath) + c.getFieldString(metricConfig, "timestamp_path", &mc.TimestampPath) + c.getFieldString(metricConfig, "timestamp_format", &mc.TimestampFormat) + c.getFieldString(metricConfig, "timestamp_timezone", &mc.TimestampTimezone) + + if fieldConfigs, ok := metricConfig.Fields["field"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var f json_v2.DataSet + c.getFieldString(fieldconfig, "path", &f.Path) + c.getFieldString(fieldconfig, "rename", &f.Rename) + c.getFieldString(fieldconfig, "type", &f.Type) + mc.Fields = append(mc.Fields, f) + } + } + } + if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var t json_v2.DataSet + c.getFieldString(fieldconfig, "path", &t.Path) + c.getFieldString(fieldconfig, "rename", &t.Rename) + t.Type = "string" + mc.Tags = append(mc.Tags, t) + } } } - } - } - } - if node, ok := tbl.Fields["json_string_fields"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.JSONStringFields = append(c.JSONStringFields, str.Value) + if objectconfigs, ok := metricConfig.Fields["object"]; ok { + if objectconfigs, ok := objectconfigs.([]*ast.Table); ok { + for _, objectConfig := range objectconfigs { + var o json_v2.JSONObject + c.getFieldString(objectConfig, "path", &o.Path) + c.getFieldString(objectConfig, "timestamp_key", &o.TimestampKey) + c.getFieldString(objectConfig, "timestamp_format", &o.TimestampFormat) + c.getFieldString(objectConfig, "timestamp_timezone", &o.TimestampTimezone) + c.getFieldBool(objectConfig, "disable_prepend_keys", &o.DisablePrependKeys) + c.getFieldStringSlice(objectConfig, "included_keys", &o.IncludedKeys) + c.getFieldStringSlice(objectConfig, "excluded_keys", &o.ExcludedKeys) + c.getFieldStringSlice(objectConfig, "tags", &o.Tags) + c.getFieldStringMap(objectConfig, "renames", &o.Renames) + c.getFieldStringMap(objectConfig, "fields", &o.Fields) + mc.JSONObjects = append(mc.JSONObjects, o) + } } } - } - } - } - if node, ok := tbl.Fields["json_name_key"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONNameKey = str.Value + pc.JSONV2Config[i] = mc } } } - if node, ok := tbl.Fields["json_query"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONQuery = str.Value - } - } + pc.MetricName = name + + if c.hasErrs() { + return nil, c.firstErr() } - if node, ok := tbl.Fields["json_time_key"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimeKey = str.Value - } - } - } - - if node, ok := tbl.Fields["json_time_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimeFormat = str.Value - } - } - } - - if node, ok := tbl.Fields["json_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimezone = str.Value - } - } - } - - if node, ok := tbl.Fields["json_strict"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.JSONStrict, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["data_type"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataType = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_auth_file"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CollectdAuthFile = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_security_level"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CollectdSecurityLevel = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CollectdSplit = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_typesdb"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardMetricRegistryPath = str.Value - } - } - } - if node, ok := tbl.Fields["dropwizard_time_path"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTimePath = str.Value - } - } - } - if node, ok := tbl.Fields["dropwizard_time_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTimeFormat = str.Value - } - } - } - if node, ok := tbl.Fields["dropwizard_tags_path"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTagsPath = str.Value - } - } - } - c.DropwizardTagPathsMap = make(map[string]string) - if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { - if kv, ok := val.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTagPathsMap[name] = str.Value - } - } - } - } - } - - //for grok data_format - if node, ok := tbl.Fields["grok_named_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["grok_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokPatterns = append(c.GrokPatterns, str.Value) - } - } - } - } - } + return pc, nil +} - if node, ok := tbl.Fields["grok_custom_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokCustomPatterns = str.Value - } - } - } +// buildSerializer grabs the necessary entries from the ast.Table for creating +// a serializers.Serializer object, and creates it, which can then be added onto +// an Output object. +func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) { + sc := &serializers.Config{TimestampUnits: 1 * time.Second} - if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value) - } - } - } - } - } + c.getFieldString(tbl, "data_format", &sc.DataFormat) - if node, ok := tbl.Fields["grok_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokTimezone = str.Value - } - } + if sc.DataFormat == "" { + sc.DataFormat = "influx" } - if node, ok := tbl.Fields["grok_unique_timestamp"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokUniqueTimestamp = str.Value - } - } - } + c.getFieldString(tbl, "prefix", &sc.Prefix) + c.getFieldString(tbl, "template", &sc.Template) + c.getFieldStringSlice(tbl, "templates", &sc.Templates) + c.getFieldString(tbl, "carbon2_format", &sc.Carbon2Format) + c.getFieldString(tbl, "carbon2_sanitize_replace_char", &sc.Carbon2SanitizeReplaceChar) + c.getFieldInt(tbl, "influx_max_line_bytes", &sc.InfluxMaxLineBytes) - //for csv parser - if node, ok := tbl.Fields["csv_column_names"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVColumnNames = append(c.CSVColumnNames, str.Value) - } - } - } - } - } + c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields) + c.getFieldBool(tbl, "influx_uint_support", &sc.InfluxUintSupport) + c.getFieldBool(tbl, "graphite_tag_support", &sc.GraphiteTagSupport) + c.getFieldString(tbl, "graphite_tag_sanitize_mode", &sc.GraphiteTagSanitizeMode) - if node, ok := tbl.Fields["csv_column_types"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value) - } - } - } - } - } + c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator) - if node, ok := tbl.Fields["csv_tag_columns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVTagColumns = append(c.CSVTagColumns, str.Value) - } - } - } - } - } + c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits) - if node, ok := tbl.Fields["csv_delimiter"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVDelimiter = str.Value - } - } - } + c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting) + c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric) - if node, ok := tbl.Fields["csv_comment"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVComment = str.Value - } - } - } + c.getFieldStringSlice(tbl, "wavefront_source_override", &sc.WavefrontSourceOverride) + c.getFieldBool(tbl, "wavefront_use_strict", &sc.WavefrontUseStrict) - if node, ok := tbl.Fields["csv_measurement_column"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVMeasurementColumn = str.Value - } - } - } + c.getFieldBool(tbl, "prometheus_export_timestamp", &sc.PrometheusExportTimestamp) + c.getFieldBool(tbl, "prometheus_sort_metrics", &sc.PrometheusSortMetrics) + c.getFieldBool(tbl, "prometheus_string_as_label", &sc.PrometheusStringAsLabel) - if node, ok := tbl.Fields["csv_timestamp_column"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimestampColumn = str.Value - } - } + if c.hasErrs() { + return nil, c.firstErr() } - if node, ok := tbl.Fields["csv_timestamp_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimestampFormat = str.Value - } - } - } + return serializers.NewSerializer(sc) +} - if node, ok := tbl.Fields["csv_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimezone = str.Value - } - } +// buildOutput parses output specific items from the ast.Table, +// builds the filter and returns an +// models.OutputConfig to be inserted into models.RunningInput +// Note: error exists in the return for future calls that might require error +func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { + filter, err := c.buildFilter(tbl) + if err != nil { + return nil, err } - - if node, ok := tbl.Fields["csv_header_row_count"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVHeaderRowCount = int(v) - } - } + oc := &models.OutputConfig{ + Name: name, + Filter: filter, } - if node, ok := tbl.Fields["csv_skip_rows"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVSkipRows = int(v) - } - } - } + // TODO: support FieldPass/FieldDrop on outputs - if node, ok := tbl.Fields["csv_skip_columns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVSkipColumns = int(v) - } - } - } + c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval) + c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter) - if node, ok := tbl.Fields["csv_trim_space"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.Boolean); ok { - //for config with no quotes - val, err := strconv.ParseBool(str.Value) - c.CSVTrimSpace = val - if err != nil { - return nil, fmt.Errorf("E! parsing to bool: %v", err) - } - } - } - } + c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit) + c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize) + c.getFieldString(tbl, "alias", &oc.Alias) + c.getFieldString(tbl, "name_override", &oc.NameOverride) + c.getFieldString(tbl, "name_suffix", &oc.NameSuffix) + c.getFieldString(tbl, "name_prefix", &oc.NamePrefix) - if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value) - } - } - } - } + if c.hasErrs() { + return nil, c.firstErr() } - c.MetricName = name - - delete(tbl.Fields, "data_format") - delete(tbl.Fields, "separator") - delete(tbl.Fields, "templates") - delete(tbl.Fields, "tag_keys") - delete(tbl.Fields, "json_name_key") - delete(tbl.Fields, "json_query") - delete(tbl.Fields, "json_string_fields") - delete(tbl.Fields, "json_time_format") - delete(tbl.Fields, "json_time_key") - delete(tbl.Fields, "json_timezone") - delete(tbl.Fields, "json_strict") - delete(tbl.Fields, "data_type") - delete(tbl.Fields, "collectd_auth_file") - delete(tbl.Fields, "collectd_security_level") - delete(tbl.Fields, "collectd_typesdb") - delete(tbl.Fields, "collectd_parse_multivalue") - delete(tbl.Fields, "dropwizard_metric_registry_path") - delete(tbl.Fields, "dropwizard_time_path") - delete(tbl.Fields, "dropwizard_time_format") - delete(tbl.Fields, "dropwizard_tags_path") - delete(tbl.Fields, "dropwizard_tag_paths") - delete(tbl.Fields, "grok_named_patterns") - delete(tbl.Fields, "grok_patterns") - delete(tbl.Fields, "grok_custom_patterns") - delete(tbl.Fields, "grok_custom_pattern_files") - delete(tbl.Fields, "grok_timezone") - delete(tbl.Fields, "grok_unique_timestamp") - delete(tbl.Fields, "csv_column_names") - delete(tbl.Fields, "csv_column_types") - delete(tbl.Fields, "csv_comment") - delete(tbl.Fields, "csv_delimiter") - delete(tbl.Fields, "csv_field_columns") - delete(tbl.Fields, "csv_header_row_count") - delete(tbl.Fields, "csv_measurement_column") - delete(tbl.Fields, "csv_skip_columns") - delete(tbl.Fields, "csv_skip_rows") - delete(tbl.Fields, "csv_tag_columns") - delete(tbl.Fields, "csv_timestamp_column") - delete(tbl.Fields, "csv_timestamp_format") - delete(tbl.Fields, "csv_timezone") - delete(tbl.Fields, "csv_trim_space") - delete(tbl.Fields, "form_urlencoded_tag_keys") - - return c, nil + return oc, nil } -// buildSerializer grabs the necessary entries from the ast.Table for creating -// a serializers.Serializer object, and creates it, which can then be added onto -// an Output object. -func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { - c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)} - - if node, ok := tbl.Fields["data_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataFormat = str.Value - } - } - } - - if c.DataFormat == "" { - c.DataFormat = "influx" - } - - if node, ok := tbl.Fields["prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Prefix = str.Value - } - } +func (c *Config) missingTomlField(_ reflect.Type, key string) error { + switch key { + case "alias", "carbon2_format", "carbon2_sanitize_replace_char", "collectd_auth_file", + "collectd_parse_multivalue", "collectd_security_level", "collectd_typesdb", "collection_jitter", + "csv_column_names", "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count", + "csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns", + "csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", "csv_skip_values", + "data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path", + "dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path", + "fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys", + "grace", "graphite_separator", "graphite_tag_sanitize_mode", "graphite_tag_support", + "grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns", + "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", + "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", + "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", "json_v2", + "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", + "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", + "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", + "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", + "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", + "value_field_name", "wavefront_source_override", "wavefront_use_strict", + "xml", "xpath", "xpath_json", "xpath_msgpack", "xpath_protobuf", "xpath_print_document", + "xpath_protobuf_file", "xpath_protobuf_type": + + // ignore fields that are common to all plugins. + default: + c.UnusedFields[key] = true } + return nil +} - if node, ok := tbl.Fields["template"]; ok { +func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { - c.Template = str.Value - } - } - } - - if node, ok := tbl.Fields["templates"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.Templates = append(c.Templates, str.Value) - } - } + *target = str.Value } } } +} - if node, ok := tbl.Fields["carbon2_format"]; ok { +func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { - c.Carbon2Format = str.Value - } - } - } - - if node, ok := tbl.Fields["influx_max_line_bytes"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.InfluxMaxLineBytes = int(v) - } - } - } - - if node, ok := tbl.Fields["influx_sort_fields"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.InfluxSortFields, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["influx_uint_support"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.InfluxUintSupport, err = b.Boolean() + d, err := time.ParseDuration(str.Value) if err != nil { - return nil, err + c.addError(tbl, fmt.Errorf("error parsing duration: %w", err)) + return } + targetVal := reflect.ValueOf(target).Elem() + targetVal.Set(reflect.ValueOf(d)) } } } +} - if node, ok := tbl.Fields["graphite_tag_support"]; ok { +func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) { + var err error + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.GraphiteTagSupport, err = b.Boolean() + switch t := kv.Value.(type) { + case *ast.Boolean: + *target, err = t.Boolean() if err != nil { - return nil, err + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) + return } - } - } - } - - if node, ok := tbl.Fields["graphite_separator"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GraphiteSeparator = str.Value - } - } - } - - if node, ok := tbl.Fields["json_timestamp_units"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - timestampVal, err := time.ParseDuration(str.Value) + case *ast.String: + *target, err = strconv.ParseBool(t.Value) if err != nil { - return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err) + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) + return } - // now that we have a duration, truncate it to the nearest - // power of ten (just in case) - nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds()))) - new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent))) - c.TimestampUnits = time.Duration(new_nanoseconds) + default: + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source())) + return } } } +} - if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok { +func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.HecRouting, err = b.Boolean() + if iAst, ok := kv.Value.(*ast.Integer); ok { + i, err := iAst.Int() if err != nil { - return nil, err + c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) + return } + *target = int(i) } } } +} - if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok { +func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.SplunkmetricMultiMetric, err = b.Boolean() + if iAst, ok := kv.Value.(*ast.Integer); ok { + i, err := iAst.Int() if err != nil { - return nil, err + c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) + return } + *target = i } } } +} - if node, ok := tbl.Fields["wavefront_source_override"]; ok { +func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { - c.WavefrontSourceOverride = append(c.WavefrontSourceOverride, str.Value) + *target = append(*target, str.Value) } } + } else { + c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format", fieldName)) + return } } } - - if node, ok := tbl.Fields["wavefront_use_strict"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.WavefrontUseStrict, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusExportTimestamp, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusSortMetrics, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_string_as_label"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusStringAsLabel, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - delete(tbl.Fields, "carbon2_format") - delete(tbl.Fields, "influx_max_line_bytes") - delete(tbl.Fields, "influx_sort_fields") - delete(tbl.Fields, "influx_uint_support") - delete(tbl.Fields, "graphite_tag_support") - delete(tbl.Fields, "graphite_separator") - delete(tbl.Fields, "data_format") - delete(tbl.Fields, "prefix") - delete(tbl.Fields, "template") - delete(tbl.Fields, "templates") - delete(tbl.Fields, "json_timestamp_units") - delete(tbl.Fields, "splunkmetric_hec_routing") - delete(tbl.Fields, "splunkmetric_multimetric") - delete(tbl.Fields, "wavefront_source_override") - delete(tbl.Fields, "wavefront_use_strict") - delete(tbl.Fields, "prometheus_export_timestamp") - delete(tbl.Fields, "prometheus_sort_metrics") - delete(tbl.Fields, "prometheus_string_as_label") - return serializers.NewSerializer(c) } -// buildOutput parses output specific items from the ast.Table, -// builds the filter and returns an -// models.OutputConfig to be inserted into models.RunningInput -// Note: error exists in the return for future calls that might require error -func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { - filter, err := buildFilter(tbl) - if err != nil { - return nil, err - } - oc := &models.OutputConfig{ - Name: name, - Filter: filter, - } - - // TODO - // Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass - if len(oc.Filter.FieldDrop) > 0 { - oc.Filter.NameDrop = oc.Filter.FieldDrop - } - if len(oc.Filter.FieldPass) > 0 { - oc.Filter.NamePass = oc.Filter.FieldPass - } - - if err := getConfigDuration(tbl, "flush_interval", &oc.FlushInterval); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "flush_jitter", &oc.FlushJitter); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["metric_buffer_limit"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err +func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) { + if node, ok := tbl.Fields[fieldName]; ok { + if subtbl, ok := node.(*ast.Table); ok { + for name, val := range subtbl.Fields { + if kv, ok := val.(*ast.KeyValue); ok { + tagfilter := models.TagFilter{Name: name} + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + tagfilter.Filter = append(tagfilter.Filter, str.Value) + } + } + } else { + c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format on each entry", fieldName)) + return + } + *target = append(*target, tagfilter) } - oc.MetricBufferLimit = int(v) } } } +} - if node, ok := tbl.Fields["metric_batch_size"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err +func (c *Config) getFieldStringMap(tbl *ast.Table, fieldName string, target *map[string]string) { + *target = map[string]string{} + if node, ok := tbl.Fields[fieldName]; ok { + if subtbl, ok := node.(*ast.Table); ok { + for name, val := range subtbl.Fields { + if kv, ok := val.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + (*target)[name] = str.Value + } } - oc.MetricBatchSize = int(v) - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.Alias = str.Value } } } +} - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NameOverride = str.Value - } - } +func keys(m map[string]bool) []string { + result := []string{} + for k := range m { + result = append(result, k) } + return result +} - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NameSuffix = str.Value - } - } - } +func (c *Config) hasErrs() bool { + return len(c.errs) > 0 +} - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NamePrefix = str.Value - } - } +func (c *Config) firstErr() error { + if len(c.errs) == 0 { + return nil } + return c.errs[0] +} - delete(tbl.Fields, "metric_buffer_limit") - delete(tbl.Fields, "metric_batch_size") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_prefix") - - return oc, nil +func (c *Config) addError(tbl *ast.Table, err error) { + c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err)) } // unwrappable lets you retrieve the original telegraf.Processor from the @@ -2222,19 +1751,3 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { type unwrappable interface { Unwrap() telegraf.Processor } - -func getConfigDuration(tbl *ast.Table, key string, target *time.Duration) error { - if node, ok := tbl.Fields[key]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - d, err := time.ParseDuration(str.Value) - if err != nil { - return err - } - delete(tbl.Fields, key) - *target = d - } - } - } - return nil -} diff --git a/config/config_test.go b/config/config_test.go index 6c5e3662a3151..940b84ada7773 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,33 +1,32 @@ package config import ( + "fmt" + "net/http" + "net/http/httptest" "os" + "runtime" + "strings" "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/exec" - "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" - "github.com/influxdata/telegraf/plugins/inputs/memcached" - "github.com/influxdata/telegraf/plugins/inputs/procstat" - httpOut "github.com/influxdata/telegraf/plugins/outputs/http" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { c := NewConfig() - err := os.Setenv("MY_TEST_SERVER", "192.168.1.1") - assert.NoError(t, err) - err = os.Setenv("TEST_INTERVAL", "10s") - assert.NoError(t, err) + require.NoError(t, os.Setenv("MY_TEST_SERVER", "192.168.1.1")) + require.NoError(t, os.Setenv("TEST_INTERVAL", "10s")) c.LoadConfig("./testdata/single_plugin_env_vars.toml") - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"192.168.1.1"} + input := inputs.Inputs["memcached"]().(*MockupInputPlugin) + input.Servers = []string{"192.168.1.1"} filter := models.Filter{ NameDrop: []string{"metricname2"}, @@ -47,26 +46,27 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filter.Compile()) + inputConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 10 * time.Second, } - mConfig.Tags = make(map[string]string) + inputConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + // Ignore Log and Parser + c.Inputs[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct mockup struct.") + require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct input metadata.") } func TestConfig_LoadSingleInput(t *testing.T) { c := NewConfig() c.LoadConfig("./testdata/single_plugin.toml") - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"localhost"} + input := inputs.Inputs["memcached"]().(*MockupInputPlugin) + input.Servers = []string{"localhost"} filter := models.Filter{ NameDrop: []string{"metricname2"}, @@ -86,35 +86,34 @@ func TestConfig_LoadSingleInput(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filter.Compile()) + inputConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 5 * time.Second, } - mConfig.Tags = make(map[string]string) + inputConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + // Ignore Log and Parser + c.Inputs[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") + require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") } func TestConfig_LoadDirectory(t *testing.T) { c := NewConfig() - err := c.LoadConfig("./testdata/single_plugin.toml") - if err != nil { - t.Error(err) - } - err = c.LoadDirectory("./testdata/subconfig") - if err != nil { - t.Error(err) - } + require.NoError(t, c.LoadConfig("./testdata/single_plugin.toml")) + require.NoError(t, c.LoadDirectory("./testdata/subconfig")) - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"localhost"} + // Create the expected data + expectedPlugins := make([]*MockupInputPlugin, 4) + expectedConfigs := make([]*models.InputConfig, 4) - filter := models.Filter{ + expectedPlugins[0] = inputs.Inputs["memcached"]().(*MockupInputPlugin) + expectedPlugins[0].Servers = []string{"localhost"} + + filterMockup := models.Filter{ NameDrop: []string{"metricname2"}, NamePass: []string{"metricname1"}, FieldDrop: []string{"other", "stuff"}, @@ -132,121 +131,138 @@ func TestConfig_LoadDirectory(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filterMockup.Compile()) + expectedConfigs[0] = &models.InputConfig{ Name: "memcached", - Filter: filter, + Filter: filterMockup, Interval: 5 * time.Second, } - mConfig.Tags = make(map[string]string) - - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + expectedConfigs[0].Tags = make(map[string]string) - ex := inputs.Inputs["exec"]().(*exec.Exec) + expectedPlugins[1] = inputs.Inputs["exec"]().(*MockupInputPlugin) p, err := parsers.NewParser(&parsers.Config{ MetricName: "exec", DataFormat: "json", JSONStrict: true, }) - assert.NoError(t, err) - ex.SetParser(p) - ex.Command = "/usr/bin/myothercollector --foo=bar" - eConfig := &models.InputConfig{ + require.NoError(t, err) + expectedPlugins[1].SetParser(p) + expectedPlugins[1].Command = "/usr/bin/myothercollector --foo=bar" + expectedConfigs[1] = &models.InputConfig{ Name: "exec", MeasurementSuffix: "_myothercollector", } - eConfig.Tags = make(map[string]string) - - exec := c.Inputs[1].Input.(*exec.Exec) - require.NotNil(t, exec.Log) - exec.Log = nil - - assert.Equal(t, ex, c.Inputs[1].Input, - "Merged Testdata did not produce a correct exec struct.") - assert.Equal(t, eConfig, c.Inputs[1].Config, - "Merged Testdata did not produce correct exec metadata.") + expectedConfigs[1].Tags = make(map[string]string) - memcached.Servers = []string{"192.168.1.1"} - assert.Equal(t, memcached, c.Inputs[2].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[2].Config, - "Testdata did not produce correct memcached metadata.") + expectedPlugins[2] = inputs.Inputs["memcached"]().(*MockupInputPlugin) + expectedPlugins[2].Servers = []string{"192.168.1.1"} - pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) - pstat.PidFile = "/var/run/grafana-server.pid" - - pConfig := &models.InputConfig{Name: "procstat"} - pConfig.Tags = make(map[string]string) - - assert.Equal(t, pstat, c.Inputs[3].Input, - "Merged Testdata did not produce a correct procstat struct.") - assert.Equal(t, pConfig, c.Inputs[3].Config, - "Merged Testdata did not produce correct procstat metadata.") + filterMemcached := models.Filter{ + NameDrop: []string{"metricname2"}, + NamePass: []string{"metricname1"}, + FieldDrop: []string{"other", "stuff"}, + FieldPass: []string{"some", "strings"}, + TagDrop: []models.TagFilter{ + { + Name: "badtag", + Filter: []string{"othertag"}, + }, + }, + TagPass: []models.TagFilter{ + { + Name: "goodtag", + Filter: []string{"mytag"}, + }, + }, + } + require.NoError(t, filterMemcached.Compile()) + expectedConfigs[2] = &models.InputConfig{ + Name: "memcached", + Filter: filterMemcached, + Interval: 5 * time.Second, + } + expectedConfigs[2].Tags = make(map[string]string) + + expectedPlugins[3] = inputs.Inputs["procstat"]().(*MockupInputPlugin) + expectedPlugins[3].PidFile = "/var/run/grafana-server.pid" + expectedConfigs[3] = &models.InputConfig{Name: "procstat"} + expectedConfigs[3].Tags = make(map[string]string) + + // Check the generated plugins + require.Len(t, c.Inputs, len(expectedPlugins)) + require.Len(t, c.Inputs, len(expectedConfigs)) + for i, plugin := range c.Inputs { + input := plugin.Input.(*MockupInputPlugin) + // Check the logger and ignore it for comparison + require.NotNil(t, input.Log) + input.Log = nil + + // Ignore the parser if not expected + if expectedPlugins[i].parser == nil { + input.parser = nil + } + + require.Equalf(t, expectedPlugins[i], plugin.Input, "Plugin %d: incorrect struct produced", i) + require.Equalf(t, expectedConfigs[i], plugin.Config, "Plugin %d: incorrect config produced", i) + } } func TestConfig_LoadSpecialTypes(t *testing.T) { c := NewConfig() - err := c.LoadConfig("./testdata/special_types.toml") - assert.NoError(t, err) - require.Equal(t, 1, len(c.Inputs)) + require.NoError(t, c.LoadConfig("./testdata/special_types.toml")) + require.Len(t, c.Inputs, 1) - inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2) - assert.Equal(t, true, ok) + input, ok := c.Inputs[0].Input.(*MockupInputPlugin) + require.True(t, ok) // Tests telegraf duration parsing. - assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout) + require.Equal(t, Duration(time.Second), input.WriteTimeout) // Tests telegraf size parsing. - assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize) + require.Equal(t, Size(1024*1024), input.MaxBodySize) // Tests toml multiline basic strings. - assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert) + require.Equal(t, "/path/to/my/cert", strings.TrimRight(input.TLSCert, "\r\n")) } func TestConfig_FieldNotDefined(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/invalid_field.toml") require.Error(t, err, "invalid field name") - assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error()) - + require.Equal(t, "Error loading config file ./testdata/invalid_field.toml: plugin inputs.http_listener_v2: line 1: configuration specified the fields [\"not_a_field\"], but they weren't used", err.Error()) } func TestConfig_WrongFieldType(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Port) cannot unmarshal TOML string into int", err.Error()) c = NewConfig() err = c.LoadConfig("./testdata/wrong_field_type2.toml") require.Error(t, err, "invalid field type2") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Methods) cannot unmarshal TOML string into []string", err.Error()) } func TestConfig_InlineTables(t *testing.T) { // #4098 c := NewConfig() - err := c.LoadConfig("./testdata/inline_table.toml") - assert.NoError(t, err) - require.Equal(t, 2, len(c.Outputs)) - - outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP) - assert.Equal(t, true, ok) - assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers) - assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) + require.NoError(t, c.LoadConfig("./testdata/inline_table.toml")) + require.Len(t, c.Outputs, 2) + + output, ok := c.Outputs[1].Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, output.Headers) + require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) } func TestConfig_SliceComment(t *testing.T) { t.Skipf("Skipping until #3642 is resolved") c := NewConfig() - err := c.LoadConfig("./testdata/slice_comment.toml") - assert.NoError(t, err) - require.Equal(t, 1, len(c.Outputs)) + require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml")) + require.Len(t, c.Outputs, 1) - outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP) - assert.Equal(t, []string{"test"}, outputHTTP.Scopes) - assert.Equal(t, true, ok) + output, ok := c.Outputs[0].Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, []string{"test"}, output.Scopes) } func TestConfig_BadOrdering(t *testing.T) { @@ -255,5 +271,135 @@ func TestConfig_BadOrdering(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/non_slice_slice.toml") require.Error(t, err, "bad ordering") - assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) + require.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) +} + +func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { + // #8256 Cannot use empty string as the namespace prefix + c := NewConfig() + require.NoError(t, c.LoadConfig("./testdata/azure_monitor.toml")) + require.Len(t, c.Outputs, 2) + + expectedPrefix := []string{"Telegraf/", ""} + for i, plugin := range c.Outputs { + output, ok := plugin.Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, expectedPrefix[i], output.NamespacePrefix) + } +} + +func TestConfig_URLRetries3Fails(t *testing.T) { + httpLoadConfigRetryInterval = 0 * time.Second + responseCounter := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + responseCounter++ + })) + defer ts.Close() + + expected := fmt.Sprintf("Error loading config file %s: Retry 3 of 3 failed to retrieve remote config: 404 Not Found", ts.URL) + + c := NewConfig() + err := c.LoadConfig(ts.URL) + require.Error(t, err) + require.Equal(t, expected, err.Error()) + require.Equal(t, 4, responseCounter) +} + +func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { + httpLoadConfigRetryInterval = 0 * time.Second + responseCounter := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if responseCounter <= 2 { + w.WriteHeader(http.StatusNotFound) + } else { + w.WriteHeader(http.StatusOK) + } + responseCounter++ + })) + defer ts.Close() + + c := NewConfig() + require.NoError(t, c.LoadConfig(ts.URL)) + require.Equal(t, 4, responseCounter) +} + +func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + c := NewConfig() + err := os.Setenv("TELEGRAF_CONFIG_PATH", ts.URL) + require.NoError(t, err) + configPath, err := getDefaultConfigPath() + require.NoError(t, err) + require.Equal(t, ts.URL, configPath) + err = c.LoadConfig("") + require.NoError(t, err) +} + +func TestConfig_URLLikeFileName(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("http:##www.example.com.conf") + require.Error(t, err) + + if runtime.GOOS == "windows" { + // The error file not found error message is different on windows + require.Equal(t, "Error loading config file http:##www.example.com.conf: open http:##www.example.com.conf: The system cannot find the file specified.", err.Error()) + } else { + require.Equal(t, "Error loading config file http:##www.example.com.conf: open http:##www.example.com.conf: no such file or directory", err.Error()) + } +} + +/*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ +type MockupInputPlugin struct { + Servers []string `toml:"servers"` + Methods []string `toml:"methods"` + Timeout Duration `toml:"timeout"` + ReadTimeout Duration `toml:"read_timeout"` + WriteTimeout Duration `toml:"write_timeout"` + MaxBodySize Size `toml:"max_body_size"` + Port int `toml:"port"` + Command string + PidFile string + Log telegraf.Logger `toml:"-"` + tls.ServerConfig + + parser parsers.Parser +} + +func (m *MockupInputPlugin) SampleConfig() string { return "Mockup test intput plugin" } +func (m *MockupInputPlugin) Description() string { return "Mockup test intput plugin" } +func (m *MockupInputPlugin) Gather(acc telegraf.Accumulator) error { return nil } +func (m *MockupInputPlugin) SetParser(parser parsers.Parser) { m.parser = parser } + +/*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/ +type MockupOuputPlugin struct { + URL string `toml:"url"` + Headers map[string]string `toml:"headers"` + Scopes []string `toml:"scopes"` + NamespacePrefix string `toml:"namespace_prefix"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig +} + +func (m *MockupOuputPlugin) Connect() error { return nil } +func (m *MockupOuputPlugin) Close() error { return nil } +func (m *MockupOuputPlugin) Description() string { return "Mockup test output plugin" } +func (m *MockupOuputPlugin) SampleConfig() string { return "Mockup test output plugin" } +func (m *MockupOuputPlugin) Write(metrics []telegraf.Metric) error { return nil } + +// Register the mockup plugin on loading +func init() { + // Register the mockup input plugin for the required names + inputs.Add("exec", func() telegraf.Input { return &MockupInputPlugin{Timeout: Duration(time.Second * 5)} }) + inputs.Add("http_listener_v2", func() telegraf.Input { return &MockupInputPlugin{} }) + inputs.Add("memcached", func() telegraf.Input { return &MockupInputPlugin{} }) + inputs.Add("procstat", func() telegraf.Input { return &MockupInputPlugin{} }) + + // Register the mockup output plugin for the required names + outputs.Add("azure_monitor", func() telegraf.Output { return &MockupOuputPlugin{NamespacePrefix: "Telegraf/"} }) + outputs.Add("http", func() telegraf.Output { return &MockupOuputPlugin{} }) } diff --git a/config/testdata/azure_monitor.toml b/config/testdata/azure_monitor.toml new file mode 100644 index 0000000000000..6151bea9020c5 --- /dev/null +++ b/config/testdata/azure_monitor.toml @@ -0,0 +1,4 @@ +[[outputs.azure_monitor]] + +[[outputs.azure_monitor]] + namespace_prefix = "" diff --git a/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml index f71b98206e5e8..6967d6e862277 100644 --- a/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -176,14 +176,6 @@ # If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. servers = ["127.0.0.1:4021"] -# Read metrics from local Lustre service on OST, MDS -[[inputs.lustre2]] - # An array of /proc globs to search for Lustre stats - # If not specified, the default will work on Lustre 2.5.x - # - # ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] - # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] - # Read metrics about memory usage [[inputs.mem]] # no configuration diff --git a/config/types_test.go b/config/types_test.go index 8e35de6111c82..afff599e3d6e4 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -29,3 +29,49 @@ func TestConfigDuration(t *testing.T) { require.Equal(t, p.MaxParallelLookups, 13) require.Equal(t, p.Ordered, true) } + +func TestDuration(t *testing.T) { + var d config.Duration + + require.NoError(t, d.UnmarshalTOML([]byte(`"1s"`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`1s`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`'1s'`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`10`))) + require.Equal(t, 10*time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`1.5`))) + require.Equal(t, time.Second, time.Duration(d)) +} + +func TestSize(t *testing.T) { + var s config.Size + + require.NoError(t, s.UnmarshalTOML([]byte(`"1B"`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`1`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`'1'`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`"1GB"`))) + require.Equal(t, int64(1000*1000*1000), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`"12GiB"`))) + require.Equal(t, int64(12*1024*1024*1024), int64(s)) +} diff --git a/docker-compose.yml b/docker-compose.yml index 4e94b8f012eab..bd092d0718388 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -40,12 +40,15 @@ services: - MYSQL_ALLOW_EMPTY_PASSWORD=yes ports: - "3306:3306" + # removes warning "mbind operation not permitted" enables you to see the docker logs + cap_add: + - SYS_NICE # CAP_SYS_NICE memcached: image: memcached ports: - "11211:11211" pgbouncer: - image: mbentley/ubuntu-pgbouncer + image: z9pascal/pgbouncer-container:1.15-latest environment: - PG_ENV_POSTGRESQL_USER=pgbouncer - PG_ENV_POSTGRESQL_PASS=pgbouncer @@ -53,7 +56,7 @@ services: - "6432:6432" postgres: image: postgres:alpine - environment: + environment: - POSTGRES_HOST_AUTH_METHOD=trust ports: - "5432:5432" @@ -75,6 +78,10 @@ services: image: ncarlier/mqtt ports: - "1883:1883" + opcua: + image: open62541/open62541 + ports: + - "4840:4840" riemann: image: stealthly/docker-riemann ports: diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md index a5930a3e0df6d..0edf467837457 100644 --- a/docs/AGGREGATORS.md +++ b/docs/AGGREGATORS.md @@ -11,13 +11,13 @@ This section is for developers who want to create a new aggregator plugin. `github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. + consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this aggregator does. * The Aggregator plugin will need to keep caches of metrics that have passed through it. This should be done using the builtin `HashID()` function of each metric. * When the `Reset()` function is called, all caches should be cleared. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. ### Aggregator Plugin Example @@ -128,5 +128,5 @@ func init() { ``` [telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 7be34aed5cef4..934a4b0cf7706 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -17,8 +17,8 @@ metrics as they pass through Telegraf: │ Memory │───┤ ┌──▶│ InfluxDB │ │ │ │ │ │ │ └───────────┘ │ ┌─────────────┠┌─────────────┠│ └───────────┘ - │ │ │ │Aggregate │ │ -┌───────────┠│ │Process │ │ - mean │ │ ┌───────────┠+ │ │ │ │Aggregators │ │ +┌───────────┠│ │Processors │ │ - mean │ │ ┌───────────┠│ │ │ │ - transform │ │ - quantiles │ │ │ │ │ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │ │ │ │ │ - filter │ │ - count │ │ │ │ @@ -62,6 +62,6 @@ emit the aggregates and not the original metrics. Since aggregates are created for each measurement, field, and unique tag combination the plugin receives, you can make use of `taginclude` to group -aggregates by specific tags only. +aggregates by specific tags only. **Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included. diff --git a/docs/COMMANDS_AND_FLAGS.md b/docs/COMMANDS_AND_FLAGS.md new file mode 100644 index 0000000000000..cb0c31268c9a4 --- /dev/null +++ b/docs/COMMANDS_AND_FLAGS.md @@ -0,0 +1,67 @@ +# Telegraf Commands & Flags + +### Usage + +``` +telegraf [commands] +telegraf [flags] +``` + +### Commands + +|command|description| +|--------|-----------------------------------------------| +|`config` |print out full sample configuration to stdout| +|`version`|print the version to stdout| + +### Flags + +|flag|description| +|-------------------|------------| +|`--aggregator-filter ` |filter the aggregators to enable, separator is `:`| +|`--config ` |configuration file to load| +|`--config-directory ` |directory containing additional *.conf files| +|`--watch-config` |Telegraf will restart on local config changes.
Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`.
Monitoring is off by default.| +|`--plugin-directory` |directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced.| +|`--debug` |turn on debug logging| +|`--input-filter ` |filter the inputs to enable, separator is `:`| +|`--input-list` |print available input plugins.| +|`--output-filter ` |filter the outputs to enable, separator is `:`| +|`--output-list` |print available output plugins.| +|`--pidfile ` |file to write our pid to| +|`--pprof-addr
` |pprof address to listen on, don't activate pprof if empty| +|`--processor-filter ` |filter the processors to enable, separator is `:`| +|`--quiet` |run in quiet mode| +|`--section-filter` |filter config sections to output, separator is `:`
Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| +|`--sample-config` |print out full sample configuration| +|`--once` |enable once mode: gather metrics once, write them, and exit| +|`--test` |enable test mode: gather metrics once and print them| +|`--test-wait` |wait up to this many seconds for service inputs to complete in test or once mode| +|`--usage ` |print usage for a plugin, ie, `telegraf --usage mysql`| +|`--version` |display the version and exit| + +### Examples + +**Generate a telegraf config file:** + +`telegraf config > telegraf.conf` + +**Generate config with only cpu input & influxdb output plugins defined:** + +`telegraf --input-filter cpu --output-filter influxdb config` + +**Run a single telegraf collection, outputting metrics to stdout:** + +`telegraf --config telegraf.conf --test` + +**Run telegraf with all plugins defined in config file:** + +`telegraf --config telegraf.conf` + +**Run telegraf, enabling the cpu & memory input, and influxdb output plugins:** + +`telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb` + +**Run telegraf with pprof:** + +`telegraf --config telegraf.conf --pprof-addr localhost:6060` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 9b8b07263b700..9af88b669ea9f 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -19,6 +19,8 @@ To generate a file with specific inputs and outputs, you can use the telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` +[View the full list][flags] of Telegraf commands and flags or by running `telegraf --help`. + ### Configuration Loading The location of the configuration file can be set via the `--config` command @@ -87,16 +89,16 @@ INFLUX_BUCKET="replace_with_your_bucket_name" # For InfluxDB OSS 2: [[outputs.influxdb_v2]] urls = ["${INFLUX_HOST}"] - token = ["${INFLUX_TOKEN}"] - org = ["${INFLUX_ORG}"] - bucket = ["${INFLUX_BUCKET}"] + token = "${INFLUX_TOKEN}" + organization = "${INFLUX_ORG}" + bucket = "${INFLUX_BUCKET}" # For InfluxDB Cloud 2: [[outputs.influxdb_v2]] urls = ["${INFLUX_HOST}"] - token = ["${INFLUX_TOKEN}"] - org = ["${INFLUX_ORG}"] - bucket = ["${INFLUX_BUCKET}"] + token = "${INFLUX_TOKEN}" + organization = "${INFLUX_ORG}" + bucket = "${INFLUX_BUCKET}" ``` The above files will produce the following effective configuration file to be @@ -117,7 +119,7 @@ parsed: [[outputs.influxdb_v2]] urls = ["http://127.0.0.1:8086"] # double check the port. could be 9999 if using OSS Beta token = "replace_with_your_token" - org = "your_username" + organization = "your_username" bucket = "replace_with_your_bucket_name" # For InfluxDB Cloud 2: @@ -126,7 +128,7 @@ parsed: INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com" # Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls token = "replace_with_your_token" - org = "yourname@yourcompany.com" + organization = "yourname@yourcompany.com" bucket = "replace_with_your_bucket_name" ``` @@ -144,6 +146,7 @@ combining an integer value and time unit as a string value. Valid time units ar Global tags can be specified in the `[global_tags]` table in key="value" format. All metrics that are gathered will be tagged with the tags specified. +Global tags are overriden by tags set by plugins. ```toml [global_tags] @@ -219,6 +222,10 @@ The agent table configures Telegraf and the defaults used across all plugins. Maximum number of rotated archives to keep, any older logs are deleted. If set to -1, no archives are removed. +- **log_with_timezone**: + Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. + [See this page for options/formats.](https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt) + - **hostname**: Override default hostname, if empty use os.Hostname() - **omit_hostname**: @@ -428,7 +435,7 @@ Parameters that can be used with any aggregator plugin: the name of the input). - **name_prefix**: Specifies a prefix to attach to the measurement name. - **name_suffix**: Specifies a suffix to attach to the measurement name. -- **tags**: A map of tags to apply to a specific input's measurements. +- **tags**: A map of tags to apply to the measurement - behavior varies based on aggregator. The [metric filtering][] parameters can be used to limit what metrics are handled by the aggregator. Excluded metrics are passed downstream to the next @@ -666,3 +673,4 @@ Reference the detailed [TLS][] documentation. [telegraf.conf]: /etc/telegraf.conf [TLS]: /docs/TLS.md [glob pattern]: https://github.com/gobwas/glob#syntax +[flags]: /docs/COMMANDS_AND_FLAGS.md diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index b716501683bf8..cb04d3e009030 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -5,17 +5,21 @@ using a configurable parser into [metrics][]. This allows, for example, the `kafka_consumer` input plugin to process messages in either InfluxDB Line Protocol or in JSON format. -- [InfluxDB Line Protocol](/plugins/parsers/influx) - [Collectd](/plugins/parsers/collectd) - [CSV](/plugins/parsers/csv) - [Dropwizard](/plugins/parsers/dropwizard) - [Graphite](/plugins/parsers/graphite) - [Grok](/plugins/parsers/grok) +- [InfluxDB Line Protocol](/plugins/parsers/influx) - [JSON](/plugins/parsers/json) +- [JSON v2](/plugins/parsers/json_v2) - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) +- [Prometheus](/plugins/parsers/prometheus) +- [PrometheusRemoteWrite](/plugins/parsers/prometheusremotewrite) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) +- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) Any input plugin containing the `data_format` option can use it to select the desired parser: @@ -29,9 +33,6 @@ desired parser: name_suffix = "_mycollector" ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" ``` diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 2b3e953601218..720c922de6755 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -8,10 +8,12 @@ plugins. 1. [Carbon2](/plugins/serializers/carbon2) 1. [Graphite](/plugins/serializers/graphite) 1. [JSON](/plugins/serializers/json) +1. [MessagePack](/plugins/serializers/msgpack) 1. [Prometheus](/plugins/serializers/prometheus) +1. [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) +1. [ServiceNow Metrics](/plugins/serializers/nowmetric) 1. [SplunkMetric](/plugins/serializers/splunkmetric) 1. [Wavefront](/plugins/serializers/wavefront) -1. [ServiceNow Metrics](/plugins/serializers/nowmetric) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: @@ -22,8 +24,5 @@ You will be able to identify the plugins with support by the presence of a files = ["stdout"] ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` diff --git a/docs/DOCKER.md b/docs/DOCKER.md new file mode 100644 index 0000000000000..5d0484e10be5a --- /dev/null +++ b/docs/DOCKER.md @@ -0,0 +1,3 @@ +# Telegraf Docker Images + +Docker images for Telegraf are kept in the [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker/tree/master/telegraf) repo. diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index aa3b5058aa8b4..83759ed72bb63 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -8,6 +8,8 @@ more flexibility compared to internal Telegraf plugins. - External plugins can access to libraries not written in Go - Utilize licensed software that isn't available to the open source community - Can include large dependencies that would otherwise bloat Telegraf +- You don't need to wait on the Telegraf team to publish your plugin and start working with it. +- using the [shim](/plugins/common/shim) you can easily convert plugins between internal and external use ### External Plugin Guidelines The guidelines of writing external plugins would follow those for our general [input](/docs/INPUTS.md), @@ -56,13 +58,12 @@ This is a guide to help you set up your plugin to use it with `execd` block to look for this plugin. 1. Add usage and development instructions in the homepage of your repository for running your plugin with its respective `execd` plugin. Please refer to - [openvpn](/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](/vipinvkmenon/awsalarms#installation) + [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) for examples. Include the following steps: 1. How to download the release package for your platform or how to clone the binary for your external plugin - 1. The commands to unpack or build your binary + 1. The commands to build your binary 1. Location to edit your `telegraf.conf` 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), [processors.execd](/plugins/processors/execd) or [outputs.execd](/plugins/outputs/execd) - 1. Note that restart or reload of Telegraf is required 1. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) list. Please include the plugin name, link to the plugin repository and a short description of the plugin. diff --git a/docs/FAQ.md b/docs/FAQ.md index 4fe28db8b9cbc..40a101fdf6fe1 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -50,8 +50,6 @@ You can use the following techniques to avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases using the [show cardinality][] commands. - Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques. @@ -59,13 +57,6 @@ You can use the following techniques to avoid cardinality issues: [series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx docs]: https://docs.influxdata.com/influxdb/latest/ - -### Q: When will the next version be released? - -The latest release date estimate can be viewed on the -[milestones](https://github.com/influxdata/telegraf/milestones) page. diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 179b674442d6d..679c24e287604 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -17,10 +17,10 @@ and submit new inputs. `github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style + consult the [Sample Config][] page for the latest style guidelines. - The `Description` function should say in one line what this plugin does. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. Let's say you've written a plugin that emits metrics about processes on the current host. @@ -83,7 +83,7 @@ func init() { ### Typed Metrics -In addition the the `AddFields` function, the accumulator also supports +In addition to the `AddFields` function, the accumulator also supports functions to add typed metrics: `AddGauge`, `AddCounter`, etc. Metric types are ignored by the InfluxDB output, but can be used for other outputs, such as [prometheus][prom metric types]. @@ -143,8 +143,8 @@ Check the [amqp_consumer][] for an example implementation. [amqp_consumer]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer [prom metric types]: https://prometheus.io/docs/concepts/metric_types/ [input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input [telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput [telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator diff --git a/docs/INTEGRATION_TESTS.md b/docs/INTEGRATION_TESTS.md new file mode 100644 index 0000000000000..b7af829588c8b --- /dev/null +++ b/docs/INTEGRATION_TESTS.md @@ -0,0 +1,61 @@ +# Integration Tests + +To run our current integration test suite: + +Running the integration tests requires several docker containers to be +running. You can start the containers with: +``` +docker-compose up +``` + +To run only the integration tests use: + +``` +make test-integration +``` + +Use `make docker-kill` to stop the containers. + +Contributing integration tests: + +- Add Integration to the end of the test name so it will be run with the above command. +- Writes tests where no library is being used in the plugin +- There is poor code coverage +- It has dynamic code that only gets run at runtime eg: SQL + +Current areas we have integration tests: + +| Area | What it does | +|------------------------------------|-------------------------------------------| +| Inputs: Aerospike | | +| Inputs: Disque | | +| Inputs: Dovecot | | +| Inputs: Mcrouter | | +| Inputs: Memcached | | +| Inputs: Mysql | | +| Inputs: Opcua | | +| Inputs: Openldap | | +| Inputs: Pgbouncer | | +| Inputs: Postgresql | | +| Inputs: Postgresql extensible | | +| Inputs: Procstat / Native windows | | +| Inputs: Prometheus | | +| Inputs: Redis | | +| Inputs: Sqlserver | | +| Inputs: Win perf counters | | +| Inputs: Win services | | +| Inputs: Zookeeper | | +| Outputs: Cratedb / Postgres | | +| Outputs: Elasticsearch | | +| Outputs: Kafka | | +| Outputs: MQTT | | +| Outputs: Nats | | +| Outputs: NSQ | | + +Areas we would benefit most from new integration tests: + +| Area | +|------------------------------------| +| SNMP | +| MYSQL | +| SQLSERVER | diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index d8a942e63e1ad..c2f542cd77cbd 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -8,25 +8,58 @@ following works: - collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) - github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) - github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) +- github.com/Azure/azure-kusto-go [MIT](https://github.com/Azure/azure-kusto-go/blob/master/LICENSE) - github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) - github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE) +- github.com/Azure/azure-storage-blob-go [MIT License](https://github.com/Azure/azure-storage-blob-go/blob/master/LICENSE) - github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) - github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) -- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) - github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) +- github.com/alecthomas/participle [MIT License](https://github.com/alecthomas/participle/blob/master/COPYING) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) +- github.com/aliyun/alibaba-cloud-sdk-go [Apache License 2.0](https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/LICENSE) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) +- github.com/antchfx/jsonquery [MIT License](https://github.com/antchfx/jsonquery/blob/master/LICENSE) +- github.com/antchfx/xmlquery [MIT License](https://github.com/antchfx/xmlquery/blob/master/LICENSE) +- github.com/antchfx/xpath [MIT License](https://github.com/antchfx/xpath/blob/master/LICENSE) +- github.com/apache/arrow/go/arrow [Apache License 2.0](https://github.com/apache/arrow/blob/master/LICENSE.txt) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) +- github.com/armon/go-metrics [MIT License](https://github.com/armon/go-metrics/blob/master/LICENSE) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/dynamodb/attributevalue/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/s3/manager/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/configsources [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/configsources/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/ini/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/cloudwatch [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatch/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatchlogs/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/dynamodb [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/dynamodb/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/dynamodbstreams [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/dynamodbstreams/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/endpoint-discovery/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/s3shared [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/s3shared/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/kinesis [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/kinesis/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/s3 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/s3/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/sso [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/sts [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/sts/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/timestreamwrite [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/timestreamwrite/LICENSE.txt) +- github.com/aws/smithy-go [Apache License 2.0](https://github.com/aws/smithy-go/blob/main/LICENSE) +- github.com/awslabs/kinesis-aggregation/go [Apache License 2.0](https://github.com/awslabs/kinesis-aggregation/blob/master/LICENSE.txt) - github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) +- github.com/bmatcuk/doublestar [MIT License](https://github.com/bmatcuk/doublestar/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) @@ -34,74 +67,113 @@ following works: - github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) -- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md) +- github.com/couchbase/goutils [Apache License 2.0](https://github.com/couchbase/goutils/blob/master/LICENSE.md) - github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) - github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) -- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) - github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) - github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) - github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE) -- github.com/docker/libnetwork [Apache License 2.0](https://github.com/docker/libnetwork/blob/master/LICENSE) +- github.com/doclambda/protobufquery [MIT License](https://github.com/doclambda/protobufquery/blob/master/LICENSE) +- github.com/dynatrace-oss/dynatrace-metric-utils-go [Apache License 2.0](https://github.com/dynatrace-oss/dynatrace-metric-utils-go/blob/master/LICENSE) - github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE) - github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) - github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) -- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE) +- github.com/fatih/color [MIT License](https://github.com/fatih/color/blob/master/LICENSE.md) +- github.com/form3tech-oss/jwt-go [MIT License](https://github.com/form3tech-oss/jwt-go/blob/master/LICENSE) - github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) -- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) +- github.com/go-logr/logr [Apache License 2.0](https://github.com/go-logr/logr/blob/master/LICENSE) - github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) +- github.com/go-ping/ping [MIT License](https://github.com/go-ping/ping/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) -- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE) -- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE) +- github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/main/LICENSE) +- github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/flatbuffers [Apache License 2.0](https://github.com/google/flatbuffers/blob/master/LICENSE.txt) - github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) - github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) - github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) +- github.com/google/gofuzz [Apache License 2.0](https://github.com/google/gofuzz/blob/master/LICENSE) +- github.com/google/uuid [BSD 3-Clause "New" or "Revised" License](https://github.com/google/uuid/blob/master/LICENSE) - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) +- github.com/googleapis/gnostic [Apache License 2.0](https://github.com/google/gnostic/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) +- github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE) +- github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) +- github.com/grid-x/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/grid-x/modbus/blob/master/LICENSE) +- github.com/grid-x/serial [MIT License](https://github.com/grid-x/serial/blob/master/LICENSE) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) -- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) +- github.com/hashicorp/consul/api [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) - github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) +- github.com/hashicorp/go-hclog [Mozilla Public License 2.0](https://github.com/hashicorp/go-hclog/LICENSE) +- github.com/hashicorp/go-immutable-radix [Mozilla Public License 2.0](https://github.com/hashicorp/go-immutable-radix/LICENSE) - github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) -- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE) +- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE) +- github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) +- github.com/influxdata/influxdb-observability/common [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) +- github.com/influxdata/influxdb-observability/influx2otel [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) +- github.com/influxdata/influxdb-observability/otel2influx [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) +- github.com/jackc/chunkreader [MIT License](https://github.com/jackc/chunkreader/blob/master/LICENSE) +- github.com/jackc/pgconn [MIT License](https://github.com/jackc/pgconn/blob/master/LICENSE) +- github.com/jackc/pgio [MIT License](https://github.com/jackc/pgio/blob/master/LICENSE) +- github.com/jackc/pgpassfile [MIT License](https://github.com/jackc/pgpassfile/blob/master/LICENSE) +- github.com/jackc/pgproto3 [MIT License](https://github.com/jackc/pgproto3/blob/master/LICENSE) +- github.com/jackc/pgservicefile [MIT License](https://github.com/jackc/pgservicefile/blob/master/LICENSE) +- github.com/jackc/pgtype [MIT License](https://github.com/jackc/pgtype/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) +- github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) +- github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) +- github.com/jcmturner/aescts [Apache License 2.0](https://github.com/jcmturner/aescts/blob/master/LICENSE) +- github.com/jcmturner/dnsutils [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) +- github.com/jcmturner/gokrb5 [Apache License 2.0](https://github.com/jcmturner/gokrb5/blob/master/LICENSE) +- github.com/jcmturner/rpc [Apache License 2.0](https://github.com/jcmturner/rpc/blob/master/LICENSE) +- github.com/jhump/protoreflect [Apache License 2.0](https://github.com/jhump/protoreflect/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) +- github.com/josharian/intern [MIT License](https://github.com/josharian/intern/blob/master/license.md) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) +- github.com/json-iterator/go [MIT License](https://github.com/json-iterator/go/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) - github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) -- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE) -- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) +- github.com/kylelemons/godebug [Apache License](https://github.com/kylelemons/godebug/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) -- github.com/mattn/go-sqlite3 [MIT License](https://github.com/mattn/go-sqlite3/blob/master/LICENSE) +- github.com/mattn/go-colorable [MIT License](https://github.com/mattn/go-colorable/blob/master/LICENSE) +- github.com/mattn/go-ieproxy [MIT License](https://github.com/mattn/go-ieproxy/blob/master/LICENSE) +- github.com/mattn/go-isatty [MIT License](https://github.com/mattn/go-isatty/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) - github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) - github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) +- github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) +- github.com/minio/highwayhash [Apache License 2.0](https://github.com/minio/highwayhash/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/moby/ipvs [Apache License 2.0](https://github.com/moby/ipvs/blob/master/LICENSE) +- github.com/modern-go/concurrent [Apache License 2.0](https://github.com/modern-go/concurrent/blob/master/LICENSE) +- github.com/modern-go/reflect2 [Apache License 2.0](https://github.com/modern-go/reflect2/blob/master/LICENSE) - github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE) - github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE) @@ -114,26 +186,45 @@ following works: - github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) -- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) +- github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) +- github.com/philhofer/fwd [MIT License](https://github.com/philhofer/fwd/blob/master/LICENSE.md) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) +- github.com/pion/dtls [MIT License](https://github.com/pion/dtls/blob/master/LICENSE) +- github.com/pion/logging [MIT License](https://github.com/pion/logging/blob/master/LICENSE) +- github.com/pion/transport [MIT License](https://github.com/pion/transport/blob/master/LICENSE) +- github.com/pion/udp [MIT License](https://github.com/pion/udp/blob/master/LICENSE) +- github.com/pkg/browser [BSD 2-Clause "Simplified" License](https://github.com/pkg/browser/blob/master/LICENSE) - github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) - github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE) - github.com/prometheus/client_golang [Apache License 2.0](https://github.com/prometheus/client_golang/blob/master/LICENSE) - github.com/prometheus/client_model [Apache License 2.0](https://github.com/prometheus/client_model/blob/master/LICENSE) - github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE) - github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) +- github.com/prometheus/prometheus [Apache License 2.0](https://github.com/prometheus/prometheus/blob/master/LICENSE) - github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) +- github.com/remyoudompheng/bigfft [BSD 3-Clause "New" or "Revised" License](https://github.com/remyoudompheng/bigfft/blob/master/LICENSE) +- github.com/riemann/riemann-go-client [MIT License](https://github.com/riemann/riemann-go-client/blob/master/LICENSE) - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) +- github.com/showwin/speedtest-go [MIT License](https://github.com/showwin/speedtest-go/blob/master/LICENSE) +- github.com/signalfx/com_signalfx_metrics_protobuf [Apache License 2.0](https://github.com/signalfx/com_signalfx_metrics_protobuf/blob/master/LICENSE) +- github.com/signalfx/gohistogram [MIT License](https://github.com/signalfx/gohistogram/blob/master/LICENSE) +- github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) +- github.com/signalfx/sapm-proto [Apache License 2.0](https://github.com/signalfx/sapm-proto/blob/master/LICENSE) - github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) -- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE) +- github.com/sleepinggenius2/gosmi [MIT License](https://github.com/sleepinggenius2/gosmi/blob/master/LICENSE) +- github.com/snowflakedb/gosnowflake [Apache License 2.0](https://github.com/snowflakedb/gosnowflake/blob/master/LICENSE) - github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) - github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) - github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE) - github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE) - github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) - github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) +- github.com/tinylib/msgp [MIT License](https://github.com/tinylib/msgp/blob/master/LICENSE) +- github.com/tklauser/go-sysconf [BSD 3-Clause "New" or "Revised" License](https://github.com/tklauser/go-sysconf/blob/master/LICENSE) +- github.com/tklauser/numcpus [Apache License 2.0](https://github.com/tklauser/numcpus/blob/master/LICENSE) +- github.com/vapourismo/knx-go [MIT License](https://github.com/vapourismo/knx-go/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) - github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) @@ -141,36 +232,59 @@ following works: - github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) +- github.com/xdg-go/pbkdf2 [Apache License 2.0](https://github.com/xdg-go/pbkdf2/blob/main/LICENSE) +- github.com/xdg-go/scram [Apache License 2.0](https://github.com/xdg-go/scram/blob/master/LICENSE) +- github.com/xdg-go/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) +- github.com/xdg/scram [Apache License 2.0](https://github.com/xdg-go/scram/blob/master/LICENSE) +- github.com/xdg/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) +- github.com/youmark/pkcs8 [MIT License](https://github.com/youmark/pkcs8/blob/master/LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- go.mongodb.org/mongo-driver [Apache License 2.0](https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) +- go.opentelemetry.io/collector/model [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) +- go.uber.org/atomic [MIT License](https://pkg.go.dev/go.uber.org/atomic?tab=licenses) +- go.uber.org/multierr [MIT License](https://pkg.go.dev/go.uber.org/multierr?tab=licenses) - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) - golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) - golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) +- golang.org/x/term [BSD 3-Clause License](https://pkg.go.dev/golang.org/x/term?tab=licenses) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) - golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) -- golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) +- golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/xerrors/blob/master/LICENSE) - golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) +- google.golang.org/protobuf [BSD 3-Clause "New" or "Revised" License](https://pkg.go.dev/google.golang.org/protobuf?tab=licenses) - gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) +- gopkg.in/djherbis/times.v1 [MIT License](https://github.com/djherbis/times/blob/master/LICENSE) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) -- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) -- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE) +- gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) - gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) -- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) +- gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) +- gopkg.in/yaml.v3 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v3/LICENSE) +- k8s.io/api [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/client-go [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/utils [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) +- modernc.org/mathutil [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/mathutil/-/blob/master/LICENSE) +- modernc.org/memory [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/memory/-/blob/master/LICENSE) +- modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) +- sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) + +## Telegraf used and modified code from these projects -## telegraf used and modified code from these projects - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/docs/NIGHTLIES.md b/docs/NIGHTLIES.md new file mode 100644 index 0000000000000..a11b2bdfefecc --- /dev/null +++ b/docs/NIGHTLIES.md @@ -0,0 +1,21 @@ + +# Nightly Builds + +These builds are generated from the master branch each night: + +| DEB | RPM | TAR GZ | ZIP | +| --------------- | --------------- | ------------------------------| --- | +| [amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) | [aarch64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.aarch64.rpm) | [darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) | [windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) | +| [arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) | [armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) | [freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) | [windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) | +| [armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) | [armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) | [freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) | | +| [armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) | [i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) | [freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) | | +| [i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) | [ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) | [linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) | | +| [mips.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mips.deb) | [s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) | [linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) | | +| [mipsel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mipsel.deb) | [x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) | [linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) | | +| [ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) | | [linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) | | +| [s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) | | [linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) | | +| | | [linux_mips.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mips.tar.gz) | | +| | | [linux_mipsel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mipsel.tar.gz) | | +| | | [linux_ppc64le.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_ppc64le.tar.gz) | | +| | | [linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) | | +| | | [static_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_static_linux_amd64.tar.gz) | | diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index 1a27ca515f118..db8383126ad68 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -13,9 +13,9 @@ similar constructs. `github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. + consult the [Sample Config][] page for the latest style guidelines. - The `Description` function should say in one line what this output does. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. ### Output Plugin Example @@ -115,6 +115,6 @@ or investigate other reasons why the writes might be taking longer than expected [file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file [output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md index 25566fe323fd2..30b2c643de8f6 100644 --- a/docs/PROCESSORS.md +++ b/docs/PROCESSORS.md @@ -14,9 +14,9 @@ This section is for developers who want to create a new processor plugin. config`. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. + consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this processor does. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. ### Processor Plugin Example @@ -160,7 +160,7 @@ func init() { } ``` -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor [telegraf.StreamingProcessor]: https://godoc.org/github.com/influxdata/telegraf#StreamingProcessor diff --git a/docs/README.md b/docs/README.md index b7b55336c5a04..99320dee95588 100644 --- a/docs/README.md +++ b/docs/README.md @@ -10,6 +10,8 @@ - [Profiling][profiling] - [Windows Service][winsvc] - [FAQ][faq] +- Developer Builds + - [Nightlies](nightlies) [conf]: /docs/CONFIGURATION.md [metrics]: /docs/METRICS.md @@ -19,3 +21,4 @@ [profiling]: /docs/PROFILING.md [winsvc]: /docs/WINDOWS_SERVICE.md [faq]: /docs/FAQ.md +[nightlies]: /docs/NIGHTLIES.md \ No newline at end of file diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md new file mode 100644 index 0000000000000..81049fcee9f99 --- /dev/null +++ b/docs/SQL_DRIVERS_INPUT.md @@ -0,0 +1,43 @@ +# Available SQL drivers for the SQL input plugin + +This is a list of available drivers for the SQL input plugin. The data-source-name (DSN) is driver specific and +might change between versions. Please check the driver documentation for available options and the format. + +database | driver | aliases | example DSN | comment +---------------------| ------------------------------------------------------| --------------- | -------------------------------------------------------------------------------------- | ------- +CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres
pgx | see _postgres_ driver | uses PostgresQL driver +MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver +Microsoft SQL Server | [sqlserver](https://github.com/denisenkom/go-mssqldb) | mssql | `username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver +MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information +PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information +SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docu](https://pkg.go.dev/modernc.org/sqlite) for more information +TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver + +## Comments + +### Driver aliases +Some database drivers are supported though another driver (e.g. CockroachDB). For other databases we provide a more +obvious name (e.g. postgres) compared to the driver name. For all of those drivers you might use an _alias_ name +during configuration. + +### Example data-source-name DSN +The given examples are just that, so please check the driver documentation for the exact format +and available options and parameters. Please note that the format of a DSN might also change +between driver version. + +### Type conversions +Telegraf relies on type conversion of the database driver and/or the golang sql framework. In case you find +any problem, please open an issue! + +## Help +If nothing seems to work, you might find help in the telegraf forum or in the chat. + +### The documentation is wrong +Please open an issue or even better send a pull-request! + +### I found a bug +Please open an issue or even better send a pull-request! + +### My database is not supported +We currently cannot support CGO drivers in telegraf! Please check if a **pure Go** driver for the [golang sql framework](https://golang.org/pkg/database/sql/) exists. +If you found such a driver, please let us know by opening an issue or even better by sending a pull-request! diff --git a/docs/SUPPORTED_PLATFORMS.md b/docs/SUPPORTED_PLATFORMS.md new file mode 100644 index 0000000000000..9df5dfa2cbaf9 --- /dev/null +++ b/docs/SUPPORTED_PLATFORMS.md @@ -0,0 +1,199 @@ +# Supported Platforms + +Telegraf is a cross-platform application. This doc helps define which +operating systems, distributions, and releases Telegraf supports. + +Telegraf is supported on Linux, FreeBSD, Windows, and macOS. It is +written in Go which supports these operating systems and +more. Telegraf may work on Go's other operating systems and users are +welcome to build their own binaries for them. Bug reports should be +submitted only for supported platforms. + +Golang.org has a [table][go-table] of valid OS and architecture +combinations and the golang wiki has more specific [minimum +requirements][go-reqs] for Go itself. + +[go-table]: https://golang.org/doc/install/source#environment +[go-reqs]: https://github.com/golang/go/wiki/MinimumRequirements#operating-systems + +## Linux + +Telegraf intent: *Support latest versions of major linux +distributions* + +Telegraf supports RHEL, Fedora, Debian, and Ubuntu. InfluxData +provides package repositories for these distributions. Instructions +for using the package repositories can be found on +[docs.influxdata.com][repo-docs]. Bug reports should be submitted only +for supported distributions and releases. + +Telegraf's Debian or Ubuntu packages are likely to work on other +Debian-based distributions although these are not +supported. Similarly, Telegraf's Fedora and RHEL packages are likely +to work on other Redhat-based distributions although again these are +not supported. + +Telegraf releases include .tar.gz packages for use with other +distributions, for building container images, or for installation +without a package manager. As part of telegraf's release process we +publish [official images][docker-hub] to Docker Hub. + +Distrowatch lists [major distributions][dw-major] and tracks +[popularity][dw-pop] of distributions. Wikipedia lists [linux +distributions][wp-distro] by the major distribution they're based on. + +[repo-docs]: https://docs.influxdata.com/telegraf/latest/introduction/installation/ +[docker-hub]: https://hub.docker.com/_/telegraf +[dw-major]: https://distrowatch.com/dwres.php?resource=major +[dw-pop]: https://distrowatch.com/dwres.php?resource=popularity +[wp-distro]: https://en.wikipedia.org/wiki/List_of_Linux_distributions + +### RHEL + +Red Hat makes a major release every four to five years and supports +each release in production for ten years. Extended support is +available for three or more years. + +Telegraf intent: *Support releases in RHEL production, but not in +extended support.* + +Redhat publishes [release history][rh-history] and wikipedia has a +[summary timeline][wp-rhel]. + +As of April 2021, 7 and 8 are production releases. + +[rh-history]: https://access.redhat.com/articles/3078 +[wp-rhel]: https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Version_history_and_timeline + +### Ubuntu + +Ubuntu makes two releases a year. Every two years one of the releases +is an LTS (long-term support) release. Interim (non-LTS) releases are +in standard support for nine months. LTS releases are in maintenance +for five years, then in extended security maintenance for up to three +more years. + +Telegraf intent: *Support interim releases and LTS releases in Ubuntu +maintenance, but not in extended security maintenance.* + +Ubuntu publishes [release history][ub-history] and wikipedia has a +[table][wp-ub] of all releases and support status. + +As of April 2021, Ubuntu 20.10 is in standard support. Ubuntu 18.04 +LTS and 20.04 LTS are in maintenance. + +[ub-history]: https://ubuntu.com/about/release-cycle +[wp-ub]: https://en.wikipedia.org/wiki/Ubuntu_version_history#Table_of_versions + +### Debian + +Debian generally makes major releases every two years and provides +security support for each release for three years. After security +support expires the release enters long term support (LTS) until at +least five years after release. + +Telegraf intent: *Support releases under Debian security support* + +Debian publishes [releases and support status][deb-history] and +wikipedia has a [summary table][wp-deb]. + +As of April 2021, Debian 10 is in security support. + +[deb-history]: https://www.debian.org/releases/ +[wp-deb]: https://en.wikipedia.org/wiki/Debian_version_history#Release_table + +### Fedora + +Fedora makes two releases a year and supports each release for a year. + +Telegraf intent: *Support releases supported by Fedora* + +Fedora publishes [release history][fed-history] and wikipedia has a +[summary table][wp-fed]. + +[fed-history]: https://fedoraproject.org/wiki/Releases +[wp-fed]: https://en.wikipedia.org/wiki/Fedora_version_history#Version_history + +## FreeBSD + +FreeBSD makes major releases about every two years. Releases reach end +of life after five years. + +Telegraf intent: *Support releases under FreeBSD security support* + +FreeBSD publishes [release history][freebsd-history] and wikipedia has +a [summary table][wp-freebsd]. + +As of April 2021, releases 11 and 12 are under security support. + +[freebsd-history]: https://www.freebsd.org/security/#sup +[wp-freebsd]: https://en.wikipedia.org/wiki/FreeBSD#Version_history + +## Windows + +Telegraf intent: *Support current versions of Windows and Windows +Server* + +Microsoft has two release channels, the semi-annual channel (SAC) and +the long-term servicing channel (LTSC). The semi-annual channel is for +mainstream feature releases. + +Microsoft publishes [lifecycle policy by release][ms-lifecycle] and a +[product lifecycle faq][ms-lifecycle-faq]. + +[ms-lifecycle]: https://docs.microsoft.com/en-us/lifecycle/products/?terms=windows +[ms-lifecycle-faq]: https://docs.microsoft.com/en-us/lifecycle/faq/windows + +### Windows 10 + +Windows 10 makes SAC releases twice a year and supports those releases +for [18 or 30 months][w10-timeline]. They also make LTSC releases +which are supported for 10 years but are intended only for medical or +industrial devices that require a static feature set. + +Telegraf intent: *Support semi-annual channel releases supported by +Microsoft* + +Microsoft publishes Windows 10 [release information][w10-history], and +[servicing channels][w10-channels]. Wikipedia has a [summary +table][wp-w10] of support status. + +As of April 2021, versions 19H2, 20H1, and 20H2 are supported. + +[w10-timeline]: https://docs.microsoft.com/en-us/lifecycle/faq/windows#what-is-the-servicing-timeline-for-a-version-feature-update-of-windows-10 +[w10-history]: https://docs.microsoft.com/en-us/windows/release-health/release-information +[w10-channels]: https://docs.microsoft.com/en-us/windows/deployment/update/get-started-updates-channels-tools +[wp-w10]: https://en.wikipedia.org/wiki/Windows_10_version_history#Channels + +### Windows Server + +Windows Server makes SAC releases for that are supported for 18 months +and LTSC releases that are supported for five years under mainstream +support and five more years under extended support. + +Telegraf intent: *Support current semi-annual channel releases +supported by Microsoft and long-term releases under mainstream +support* + +Microsoft publishes Windows Server [release information][ws-history] +and [servicing channels][ws-channels]. + +As of April 2021, Server 2016 (version 1607) and Server 2019 (version +1809) are LTSC releases under mainstream support and versions 1909, +2004, and 20H2 are supported SAC releases. + +[ws-history]: https://docs.microsoft.com/en-us/windows-server/get-started/windows-server-release-info +[ws-channels]: https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19 + +## macOS + +MacOS makes one major release a year and provides support for each +release for three years. + +Telegraf intent: *Support releases supported by Apple* + +Release history is available from [wikipedia][wp-macos]. + +As of April 2021, 10.14, 10.15, and 11 are supported. + +[wp-macos]: https://en.wikipedia.org/wiki/MacOS#Release_history diff --git a/docs/TEMPLATE_PATTERN.md b/docs/TEMPLATE_PATTERN.md index 4244369d7dcab..42a5abea56f30 100644 --- a/docs/TEMPLATE_PATTERN.md +++ b/docs/TEMPLATE_PATTERN.md @@ -22,6 +22,7 @@ correspond to the field name. Any part of the template that is not a keyword is treated as a tag key. This can also be specified multiple times. +**NOTE:** `measurement` must be specified in your template. **NOTE:** `field*` cannot be used in conjunction with `measurement*`. ### Examples diff --git a/docs/TLS.md b/docs/TLS.md index 3cd6a1025fc4b..355da32bb98be 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -18,6 +18,8 @@ For client TLS support we have the following options: # tls_key = "/etc/telegraf/key.pem" ## Skip TLS verification. # insecure_skip_verify = false +## Send the specified TLS server name via SNI. +# tls_server_name = "foo.example.com" ``` ### Server Configuration diff --git a/docs/developers/CODE_STYLE.md b/docs/developers/CODE_STYLE.md new file mode 100644 index 0000000000000..1bbb2b14d84c4 --- /dev/null +++ b/docs/developers/CODE_STYLE.md @@ -0,0 +1,7 @@ +# Code Style +Code is required to be formatted using `gofmt`, this covers most code style +requirements. It is also highly recommended to use `goimports` to +automatically order imports. + +Please try to keep lines length under 80 characters, the exact number of +characters is not strict but it generally helps with readability. diff --git a/docs/developers/DEPRECATION.md b/docs/developers/DEPRECATION.md new file mode 100644 index 0000000000000..a3da79a5ac8e8 --- /dev/null +++ b/docs/developers/DEPRECATION.md @@ -0,0 +1,88 @@ +# Deprecation +Deprecation is the primary tool for making changes in Telegraf. A deprecation +indicates that the community should move away from using a feature, and +documents that the feature will be removed in the next major update (2.0). + +Key to deprecation is that the feature remains in Telegraf and the behavior is +not changed. + +We do not have a strict definition of a breaking change. All code changes +change behavior, the decision to deprecate or make the change immediately is +decided based on the impact. + +## Deprecate plugins + +Add a comment to the plugin's sample config, include the deprecation version +and any replacement. + +```toml +[[inputs.logparser]] + ## DEPRECATED: The 'logparser' plugin is deprecated in 1.10. Please use the + ## 'tail' plugin with the grok data_format as a replacement. +``` + +Add the deprecation warning to the plugin's README: + +```markdown +# Logparser Input Plugin + +### **Deprecated in 1.10**: Please use the [tail][] plugin along with the +`grok` [data format][]. + +[tail]: /plugins/inputs/tail/README.md +[data formats]: /docs/DATA_FORMATS_INPUT.md +``` + +Log a warning message if the plugin is used. If the plugin is a +ServiceInput, place this in the `Start()` function, for regular Input's log it only the first +time the `Gather` function is called. +```go +log.Println("W! [inputs.logparser] The logparser plugin is deprecated in 1.10. " + + "Please use the tail plugin with the grok data_format as a replacement.") +``` +## Deprecate options + +Mark the option as deprecated in the sample config, include the deprecation +version and any replacement. +```toml + ## Broker URL + ## deprecated in 1.7; use the brokers option + # url = "amqp://localhost:5672/influxdb" +``` + +In the plugins configuration struct, mention that the option is deprecated: + +```go +type AMQPConsumer struct { + URL string `toml:"url"` // deprecated in 1.7; use brokers +} +``` + +Finally, use the plugin's `Init() error` method to display a log message at warn level. The message should include the offending configuration option and any suggested replacement: +```go +func (a *AMQPConsumer) Init() error { + if p.URL != "" { + p.Log.Warnf("Use of deprecated configuration: 'url'; please use the 'brokers' option") + } + return nil +} +``` + +## Deprecate metrics + +In the README document the metric as deprecated. If there is a replacement field, +tag, or measurement then mention it. + +```markdown +- system + - fields: + - uptime_format (string, deprecated in 1.10: use `uptime` field) +``` + +Add filtering to the sample config, leave it commented out. + +```toml +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] +``` diff --git a/docs/developers/LOGGING.md b/docs/developers/LOGGING.md new file mode 100644 index 0000000000000..60de15699a6e8 --- /dev/null +++ b/docs/developers/LOGGING.md @@ -0,0 +1,75 @@ +# Logging + +## Plugin Logging + +You can access the Logger for a plugin by defining a field named `Log`. This +`Logger` is configured internally with the plugin name and alias so they do not +need to be specified for each log call. + +```go +type MyPlugin struct { + Log telegraf.Logger `toml:"-"` +} +``` + +You can then use this Logger in the plugin. Use the method corresponding to +the log level of the message. +```go +p.Log.Errorf("Unable to write to file: %v", err) +``` + +## Agent Logging + +In other sections of the code it is required to add the log level and module +manually: +```go +log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) +``` + +## When to Log + +Log a message if an error occurs but the plugin can continue working. For +example if the plugin handles several servers and only one of them has a fatal +error, it can be logged as an error. + +Use logging judiciously for debug purposes. Since Telegraf does not currently +support setting the log level on a per module basis, it is especially important +to not over do it with debug logging. + +If the plugin is listening on a socket, log a message with the address of the socket: +```go +p.log.InfoF("Listening on %s://%s", protocol, l.Addr()) +``` + +## When not to Log + +Don't use logging to emit performance data or other meta data about the plugin, +instead use the `internal` plugin and the `selfstats` package. + +Don't log fatal errors in the plugin that require the plugin to return, instead +return them from the function and Telegraf will handle the logging. + +Don't log for static configuration errors, check for them in a plugin `Init()` +function and return an error there. + +Don't log a warning every time a plugin is called for situations that are +normal on some systems. + +## Log Level + +The log level is indicated by a single character at the start of the log +message. Adding this prefix is not required when using the Plugin Logger. +- `D!` Debug +- `I!` Info +- `W!` Warning +- `E!` Error + +## Style + +Log messages should be capitalized and be a single line. + +If it includes data received from another system or process, such as the text +of an error message, the text should be quoted with `%q`. + +Use the `%v` format for the Go error type instead of `%s` to ensure a nil error +is printed. diff --git a/docs/developers/METRIC_FORMAT_CHANGES.md b/docs/developers/METRIC_FORMAT_CHANGES.md new file mode 100644 index 0000000000000..32bfe0a2db5a7 --- /dev/null +++ b/docs/developers/METRIC_FORMAT_CHANGES.md @@ -0,0 +1,42 @@ +# Metric Format Changes + +When making changes to an existing input plugin, care must be taken not to change the metric format in ways that will cause trouble for existing users. This document helps developers understand how to make metric format changes safely. + +## Changes can cause incompatibilities +If the metric format changes, data collected in the new format can be incompatible with data in the old format. Database queries designed around the old format may not work with the new format. This can cause application failures. + +Some metric format changes don't cause incompatibilities. Also, some unsafe changes are necessary. How do you know what changes are safe and what to do if your change isn't safe? + +## Guidelines +The main guideline is just to keep compatibility in mind when making changes. Often developers are focused on making a change that fixes their particular problem and they forget that many people use the existing code and will upgrade. When you're coding, keep existing users and applications in mind. + +### Renaming, removing, reusing +Database queries refer to the metric and its tags and fields by name. Any Telegraf code change that changes those names has the potential to break an existing query. Similarly, removing tags or fields can break queries. + +Changing the meaning of an existing tag value or field value or reusing an existing one in a new way isn't safe. Although queries that use these tags/field may not break, they will not work as they did before the change. + +Adding a field doesn't break existing queries. Queries that select all fields and/or tags (like "select * from") will return an extra series but this is often useful. + +### Performance and storage +Time series databases can store large amounts of data but many of them don't perform well on high cardinality data. If a metric format change includes a new tag that holds high cardinality data, database performance could be reduced enough to cause existing applications not to work as they previously did. Metric format changes that dramatically increase the number of tags or fields of a metric can increase database storage requirements unexpectedly. Both of these types of changes are unsafe. + +### Make unsafe changes opt-in +If your change has the potential to seriously affect existing users, the change must be opt-in. To do this, add a plugin configuration setting that lets the user select the metric format. Make the setting's default value select the old metric format. When new users add the plugin they can choose the new format and get its benefits. When existing users upgrade, their config files won't have the new setting so the default will ensure that there is no change. + +When adding a setting, avoid using a boolean and consider instead a string or int for future flexibility. A boolean can only handle two formats but a string can handle many. For example, compare use_new_format=true and features=["enable_foo_fields"]; the latter is much easier to extend and still very descriptive. + +If you want to encourage existing users to use the new format you can log a warning once on startup when the old format is selected. The warning should tell users in a gentle way that they can upgrade to a better metric format. If it doesn't make sense to maintain multiple metric formats forever, you can change the default on a major release or even remove the old format completely. See [[Deprecation]] for details. + +### Utility +Changes should be useful to many or most users. A change that is only useful for a small number of users may not accepted, even if it's off by default. + +## Summary table + +| | delete | rename | add | +| ------- | ------ | ------ | --- | +| metric | unsafe | unsafe | safe | +| tag | unsafe | unsafe | be careful with cardinality | +| field | unsafe | unsafe | ok as long as it's useful for existing users and is worth the added space | + +## References +InfluxDB Documentation: "Schema and data layout" diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md new file mode 100644 index 0000000000000..000479c94ce42 --- /dev/null +++ b/docs/developers/PACKAGING.md @@ -0,0 +1,47 @@ +# Packaging + +Building the packages for Telegraf is automated using [Make](https://en.wikipedia.org/wiki/Make_(software)). Just running `make` will build a Telegraf binary for the operating system and architecture you are using (if it is supported). If you need to build a different package then you can run `make package` which will build all the supported packages. You will most likely only want a subset, you can define a subset of packages to be built by overriding the `include_packages` variable like so `make package include_packages="amd64.deb"`. You can also build all packages for a specific architecture like so `make package include_packages="$(make amd64)"`. + +The packaging steps require certain tools to be setup before hand to work. These dependencies are listed in the ci-1.17.docker file which you can find in the scripts directory. Therefore it is recommended to use Docker to build the artifacts, see more details below. + +## Go Version + +Telegraf will be built using the latest version of Go whenever possible. Incrementing the version is maintained by the core Telegraf team because it requires access to an internal docker repository that hosts the docker CI images. When a new version is released, the following process is followed: + +1. Within the `Makefile` and `.circleci\config.yml` update the Go versions to the new version number +2. Run `make ci-` where `` refers to the new Go version number (this requires internal permissions) +3. The files `scripts\installgo_mac.sh` and `scripts\installgo_windows.sh` need to be updated as well with the new Go version and SHA +4. Create a pull request with these new changes, and verify the CI passes and uses the new docker image + +## Package using Docker + +This packaging method uses the CI images, and is very similar to how the +official packages are created on release. This is the recommended method for +building the rpm/deb as it is less system dependent. + +Pull the CI images from quay, the version corresponds to the version of Go +that is used to build the binary: +``` +docker pull quay.io/influxdb/telegraf-ci:1.9.7 +``` + +Start a shell in the container: +``` +docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash +``` + +From within the container: + +1. `go get -d github.com/influxdata/telegraf` +2. `cd /go/src/github.com/influxdata/telegraf` +3. `git checkout release-1.10` + * Replace tag `release-1.10` with the version of Telegraf you would like to build +4. `git reset --hard 1.10.2` +5. `make deps` +6. `make package include_packages="amd64.deb"` + * Change `include_packages` to change what package you want, run `make help` to see possible values + +From the host system, copy the build artifacts out of the container: +``` +docker cp romantic_ptolemy:/go/src/github.com/influxdata/telegraf/build/telegraf-1.10.2-1.x86_64.rpm . +``` diff --git a/docs/developers/PROFILING.md b/docs/developers/PROFILING.md new file mode 100644 index 0000000000000..81cdf1980304d --- /dev/null +++ b/docs/developers/PROFILING.md @@ -0,0 +1,55 @@ +# Profiling +This article describes how to collect performance traces and memory profiles +from Telegraf. If you are submitting this for an issue, please include the +version.txt generated below. + +Use the `--pprof-addr` option to enable the profiler, the easiest way to do +this may be to add this line to `/etc/default/telegraf`: +``` +TELEGRAF_OPTS="--pprof-addr localhost:6060" +``` + +Restart Telegraf to activate the profile address. + +#### Trace Profile +Collect a trace during the time where the performance issue is occurring. This +example collects a 10 second trace and runs for 10 seconds: +``` +curl 'http://localhost:6060/debug/pprof/trace?seconds=10' > trace.bin +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +The `trace.bin` and `version.txt` files can be sent in for analysis or, if desired, you can +analyze the trace with: +``` +go tool trace trace.bin +``` + +#### Memory Profile +Collect a heap memory profile: +``` +curl 'http://localhost:6060/debug/pprof/heap' > mem.prof +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +Analyze: +``` +$ go tool pprof mem.prof +(pprof) top5 +``` + +#### CPU Profile +Collect a 30s CPU profile: +``` +curl 'http://localhost:6060/debug/pprof/profile' > cpu.prof +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +Analyze: +``` +go tool pprof cpu.prof +(pprof) top5 +``` diff --git a/docs/developers/README.md b/docs/developers/README.md new file mode 120000 index 0000000000000..f939e75f21a8b --- /dev/null +++ b/docs/developers/README.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md new file mode 100644 index 0000000000000..49107c03f9da9 --- /dev/null +++ b/docs/developers/REVIEWS.md @@ -0,0 +1,179 @@ +# Reviews + +Pull-requests require two approvals before being merged. Expect several rounds of back and forth on +reviews, non-trivial changes are rarely accepted on the first pass. It might take some time +until you see a first review so please be patient. + +All pull requests should follow the style and best practices in the +[CONTRIBUTING.md](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) +document. + +## Process + +The review process is roughly structured as follows: + +1. Submit a pull request. +Please check that you signed the [CLA](https://www.influxdata.com/legal/cla/) (and [Corporate CLA](https://www.influxdata.com/legal/ccla/) if you are contributing code on as an employee of your company). Provide a short description of your submission and reference issues that you potentially close. Make sure the CI tests are all green and there are no linter-issues. +1. Get feedback from a first reviewer and a `ready for final review` tag. +Please constructively work with the reviewer to get your code into a mergable state (see also [below](#reviewing-plugin-code)). +1. Get a final review by one of the InfluxData maintainers. +Please fix any issue raised. +1. Wait for the pull-request to be merged. +It might take some time until your PR gets merged, depending on the release cycle and the type of +your pull-request (bugfix, enhancement of existing code, new plugin, etc). Remember, it might be necessary to rebase your code before merge to resolve conflicts. + +Please read the review comments carefully, fix the related part of the code and/or respond in case there is anything unclear. If there is no activity in a pull-request or the contributor does not respond, we apply the following scheme: + +1. We send a first reminder after at least 2 weeks of inactivity. +1. After at least another two weeks of inactivity we send a second reminder and are setting the `waiting for response` tag. +1. Another two weeks later we will ask the community for help setting the `help wanted` reminder. +1. In case nobody volunteers to take over the PR within the next 30 days, InfluxData will triage the PR and might close it due to inactivity. + +So in case you expect a longer period of inactivity or you want to abandon a pull-request, please let us know. + +## Reviewing Plugin Code + +- Avoid variables scoped to the package. Everything should be scoped to the plugin struct, since multiple instances of the same plugin are allowed and package-level variables will cause race conditions. +- SampleConfig must match the readme, but not include the plugin name. +- structs should include toml tags for fields that are expected to be editable from the config. eg `toml:"command"` (snake_case) +- plugins that want to log should declare the Telegraf logger, not use the log package. eg: + +```Go + Log telegraf.Logger `toml:"-"` +``` + +(in tests, you can do `myPlugin.Log = testutil.Logger{}`) + +- Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. +- `Init() error` should not contain connections to external services. If anything fails in Init, Telegraf will consider it a configuration error and refuse to start. +- plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. +- avoid goroutines when you don't need them and removing them would simplify the code +- errors should almost always be checked. +- avoid boolean fields when a string or enumerated type would be better for future extension. Lots of boolean fields also make the code difficult to maintain. +- use config.Duration instead of internal.Duration +- compose tls.ClientConfig as opposed to specifying all the TLS fields manually +- http.Client should be declared once on `Init() error` and reused, (or better yet, on the package if there's no client-specific configuration). http.Client has built-in concurrency protection and reuses connections transparently when possible. +- avoid doing network calls in loops where possible, as this has a large performance cost. This isn't always possible to avoid. +- when processing batches of records with multiple network requests (some outputs that need to partition writes do this), return an error when you want the whole batch to be retried, log the error when you want the batch to continue without the record +- consider using the StreamingProcessor interface instead of the (legacy) Processor interface +- avoid network calls in processors when at all possible. If it's necessary, it's possible, but complicated (see processor.reversedns). +- avoid dependencies when: + - they require cgo + - they pull in massive projects instead of small libraries + - they could be replaced by a simple http call + - they seem unnecessary, superfluous, or gratuitous +- consider adding build tags if plugins have OS-specific considerations +- use the right logger log levels so that Telegraf is normally quiet eg `plugin.Log.Debugf()` only shows up when running Telegraf with `--debug` +- consistent field types: dynamically setting the type of a field should be strongly avoided as it causes problems that are difficult to solve later, made worse by having to worry about backwards compatibility in future changes. For example, if an numeric value comes from a string field and it is not clear if the field can sometimes be a float, the author should pick either a float or an int, and parse that field consistently every time. Better to sometimes truncate a float, or to always store ints as floats, rather than changing the field type, which causes downstream problems with output databases. +- backwards compatibility: We work hard not to break existing configurations during new changes. Upgrading Telegraf should be a seamless transition. Possible tools to make this transition smooth are: + - enumerable type fields that allow you to customize behavior (avoid boolean feature flags) + - version fields that can be used to opt in to newer changed behavior without breaking old (see inputs.mysql for example) + - a new version of the plugin if it has changed significantly (eg outputs.influxdb and outputs.influxdb_v2) + - Logger and README deprecation warnings + - changing the default value of a field can be okay, but will affect users who have not specified the field and should be approached cautiously. + - The general rule here is "don't surprise me": users should not be caught off-guard by unexpected or breaking changes. + +## Linting + +Each pull request will have the appriopriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. + +## Testing + +Sufficient unit tests must be created. New plugins must always contain +some unit tests. Bug fixes and enhancements should include new tests, but +they can be allowed if the reviewer thinks it would not be worth the effort. + +[Table Driven Tests](https://github.com/golang/go/wiki/TableDrivenTests) are +encouraged to reduce boiler plate in unit tests. + +The [stretchr/testify](https://github.com/stretchr/testify) library should be +used for assertions within the tests when possible, with preference towards +github.com/stretchr/testify/require. + +Primarily use the require package to avoid cascading errors: + +```go +assert.Equal(t, lhs, rhs) # avoid +require.Equal(t, lhs, rhs) # good +``` + +## Configuration + +The config file is the primary interface and should be carefully scrutinized. + +Ensure the [[SampleConfig]] and +[README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md) +match with the current standards. + +READMEs should: + +- be spaces, not tabs +- be indented consistently, matching other READMEs +- have two `#` for comments +- have one `#` for defaults, which should always match the default value of the plugin +- include all appropriate types as a list for enumerable field types +- include a useful example, avoiding "example", "test", etc. +- include tips for any common problems +- include example output from the plugin, if input/processor/aggregator/parser/serializer + +## Metric Schema + +Telegraf metrics are heavily based on InfluxDB points, but have some +extensions to support other outputs and metadata. + +New metrics must follow the recommended +[schema design](https://docs.influxdata.com/influxdb/latest/concepts/schema_and_data_layout/). +Each metric should be evaluated for _series cardinality_, proper use of tags vs +fields, and should use existing patterns for encoding metrics. + +Metrics use `snake_case` naming style. + +### Enumerations + +Generally enumeration data should be encoded as a tag. In some cases it may +be desirable to also include the data as an integer field: + +```shell +net_response,result=success result_code=0i +``` + +### Histograms + +Use tags for each range with the `le` tag, and `+Inf` for the values out of +range. This format is inspired by the Prometheus project: + +```shell +cpu,le=0.0 usage_idle_bucket=0i 1486998330000000000 +cpu,le=50.0 usage_idle_bucket=2i 1486998330000000000 +cpu,le=100.0 usage_idle_bucket=2i 1486998330000000000 +cpu,le=+Inf usage_idle_bucket=2i 1486998330000000000 +``` + +### Lists + +Lists are tricky, but the general technique is to encode using a tag, creating +one series be item in the list. + +### Counters + +Counters retrieved from other projects often are in one of two styles, +monotonically increasing without reset and reset on each interval. No attempt +should be made to switch between these two styles but if given the option it +is preferred to use the non-reseting variant. This style is more resilient in +the face of downtime and does not contain a fixed time element. + +## Go Best Practices + +In general code should follow best practice describe in [Code Review +Comments](https://github.com/golang/go/wiki/CodeReviewComments). + +### Networking + +All network operations should have appropriate timeouts. The ability to +cancel the option, preferably using a context, is desirable but not always +worth the implementation complexity. + +### Channels + +Channels should be used in judiciously as they often complicate the design and +can easily be used improperly. Only use them when they are needed. diff --git a/docs/developers/SAMPLE_CONFIG.md b/docs/developers/SAMPLE_CONFIG.md new file mode 100644 index 0000000000000..d0969212fecb2 --- /dev/null +++ b/docs/developers/SAMPLE_CONFIG.md @@ -0,0 +1,76 @@ +# Sample Configuration + +The sample config file is generated from a results of the `SampleConfig()` and +`Description()` functions of the plugins. + +You can generate a full sample +config: +``` +telegraf config +``` + +You can also generate the config for a particular plugin using the `-usage` +option: +``` +telegraf --usage influxdb +``` + +## Style + +In the config file we use 2-space indention. Since the config is +[TOML](https://github.com/toml-lang/toml) the indention has no meaning. + +Documentation is double commented, full sentences, and ends with a period. +```toml + ## This text describes what an the exchange_type option does. + # exchange_type = "topic" +``` + +Try to give every parameter a default value whenever possible. If an +parameter does not have a default or must frequently be changed then have it +uncommented. +```toml + ## Brokers are the AMQP brokers to connect to. + brokers = ["amqp://localhost:5672"] +``` + + +Options where the default value is usually sufficient are normally commented +out. The commented out value is the default. +```toml + ## What an exchange type is. + # exchange_type = "topic" +``` + +If you want to show an example of a possible setting filled out that is +different from the default, show both: +```toml + ## Static routing key. Used when no routing_tag is set or as a fallback + ## when the tag specified in routing tag is not found. + ## example: routing_key = "telegraf" + # routing_key = "" +``` + +Unless parameters are closely related, add a space between them. Usually +parameters is closely related have a single description. +```toml + ## If true, queue will be declared as an exclusive queue. + # queue_exclusive = false + + ## If true, queue will be declared as an auto deleted queue. + # queue_auto_delete = false + + ## Authentication credentials for the PLAIN auth_method. + # username = "" + # password = "" +``` + +Parameters should usually be describable in a few sentences. If it takes +much more than this, try to provide a shorter explanation and provide a more +complex description in the Configuration section of the plugins +[README](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/example) + +Boolean parameters should be used judiciously. You should try to think of +something better since they don't scale well, things are often not truly +boolean, and frequently end up with implicit dependencies: this option does +something if this and this are also set. diff --git a/docs/maintainers/CHANGELOG.md b/docs/maintainers/CHANGELOG.md new file mode 100644 index 0000000000000..8935ad70ca74e --- /dev/null +++ b/docs/maintainers/CHANGELOG.md @@ -0,0 +1,43 @@ +# Changelog + +The changelog contains the list of changes by version in addition to release +notes. The file is updated immediately after adding a change that impacts +users. Changes that don't effect the functionality of Telegraf, such as +refactoring code, are not included. + +The changelog entries are added by a maintainer after merging a pull request. +We experimented with requiring the pull request contributor to add the entry, +which had a nice side-effect of reducing the number of changelog only commits +in the log history, however this had several drawbacks: + +- The entry often needed reworded. +- Entries frequently caused merge conflicts. +- Required contributor to know which version a change was accepted into. +- Merge conflicts made it more time consuming to backport changes. + +Changes are added only to the first version a change is added in. For +example, a change backported to 1.7.2 would only appear under 1.7.2 and not in +1.8.0. This may become confusing if we begin supporting more than one +previous version but works well for now. + +## Updating + +If the change resulted in deprecation, mention the deprecation in the Release +Notes section of the version. In general all changes that require or +recommend the user to perform an action when upgrading should be mentioned in +the release notes. + +If a new plugin has been added, include it in a section based on the type of +the plugin. + +All user facing changes, including those already mentioned in the release +notes or new plugin sections, should be added to either the Features or +Bugfixes section. + +Features should generally link to the pull request since this describes the +actual implementation. Bug fixes should link to the issue instead of the pull +request since this describes the problem, if a bug has been fixed but does not +have an issue then it is okay to link to the pull request. + +It is usually okay to just use the shortlog commit message, but if needed +it can differ or be further clarified in the changelog. diff --git a/docs/maintainers/LABELS.md b/docs/maintainers/LABELS.md new file mode 100644 index 0000000000000..1ee6cc7517c74 --- /dev/null +++ b/docs/maintainers/LABELS.md @@ -0,0 +1,72 @@ +# Labels + +This page describes the meaning of the various +[labels](https://github.com/influxdata/telegraf/labels) we use on the Github +issue tracker. + +## Categories + +New issues are automatically labeled `feature request`, `bug`, or `support`. +If you are unsure what problem the author is proposing, you can use the `need more info` label +and if there is another issue you can add the `closed/duplicate` label and close the +new issue. + +New pull requests are usually labeled one of `enhancement`, `bugfix` or `new +plugin`. + +## Additional Labels + +Apply any of the `area/*` labels that match. If an area doesn't exist, new +ones can be added but **it is not a goal to have an area for all issues.** + +If the issue only applies to one platform, you can use a `platform/*` label. +These are only applied to single platform issues which are not on Linux. + +For bugs you may want to add `panic`, `regression`, or `upstream` to provide +further detail. + +Summary of Labels: +| Label | Description | Purpose | +| --- | ----------- | ---| +| `area/*` | These labels each corresponding to a plugin or group of plugins that can be added to identify the affected plugin or group of plugins | categorization | +| `breaking change` | Improvement to Telegraf that requires breaking changes to the plugin or agent; for minor/major releases | triage | +| `bug` | New issue for an existing component of Telegraf | triage | +| `cloud` | Issues or request around cloud environments | categorization | +| `dependencies` | Pull requests that update a dependency file | triage | +| `discussion` | Issues open for discussion | community/categorization | +| `documentation` | Issues related to Telegraf documentation and configuration descriptions | categorization | +| `error handling` | Issues related to error handling | categorization | +| `external plugin` | Plugins that would be ideal external plugin and expedite being able to use plugin w/ Telegraf | categorization | +| `good first issue` | This is a smaller issue suited for getting started in Telegraf, Golang, and contributing to OSS | community | +| `help wanted` | Request for community participation, code, contribution | community | +| `need more info` | Issue triaged but outstanding questions remain | community | +| `performance` | Issues or PRs that address performance issues | categorization| +| `platform/*` | Issues that only apply to one platform | categorization | +| `plugin/*` | 1. Request for new * plugins 2. Issues/PRs that are related to * plugins | categorization | +| `ready for final review` | Pull request has been reviewed and/or tested by multiple users and is ready for a final review | triage | +| `rfc` | Request for comment - larger topics of discussion that are looking for feedback | community | +| `support` |Telegraf questions, may be directed to community site or slack | triage | +| `upstream` | Bug or issues that rely on dependency fixes and we cannot fix independently | triage | +| `waiting for response` | Waiting for response from contributor | community/triage | +| `wip` | PR still Work In Progress, not ready for detailed review | triage | + +Labels starting with `pm` are not applied by maintainers. + +## Closing Issues + +We close issues for the following reasons: + +| Label | Reason | +| --- | ----------- | +| `closed/as-designed` | Labels to be used when closing an issue or PR with short description why it was closed | +| `closed/duplicate` | This issue or pull request already exists | +| `closed/external-candidate` | The feature request is best implemented by an external plugin | +| `closed/external-issue` | The feature request is best implemented by an external plugin | +| `closed/needs more info` | Did not receive the information we need within 3 months from last activity on issue | +| `closed/not-reproducible` | Given the information we have we can't reproduce the issue | +| `closed/out-of-scope` | The feature request is out of scope for Telegraf - highly unlikely to be worked on | +| `closed/question` | This issue is a support question, directed to community site or slack | + + + + diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md new file mode 100644 index 0000000000000..90c49fd5af689 --- /dev/null +++ b/docs/maintainers/PULL_REQUESTS.md @@ -0,0 +1,69 @@ +# Pull Requests + +## Before Review + +Ensure that the CLA is signed (the `telegraf-tiger` bot performs this check). The +only exemption would be non-copyrightable changes such as fixing a typo. + +Check that all tests are passing. Due to intermittent errors in the CI tests +it may be required to check the cause of test failures and restart failed +tests and/or create new issues to fix intermittent test failures. + +Ensure that PR is opened against the master branch as all changes are merged +to master initially. It is possible to change the branch a pull request is +opened against but it often results in many conflicts, change it before +reviewing and then if needed ask the contributor to rebase. + +Ensure there are no merge conflicts. If there are conflicts, ask the +contributor to merge or rebase. + +## Review + +[Review the pull request](https://github.com/influxdata/telegraf/blob/master/docs/developers/REVIEWS.md). + +## Merge + +Determine what release the change will be applied to. New features should +be added only to master, and will be released in the next minor version (1.x). +Bug fixes can be backported to the current release branch to go out with the +next patch release (1.7.x) unless the bug is too risky to backport or there is +an easy workaround. Set the correct milestone on the pull request and any +associated issue. + +All pull requests are merged using the "Squash and Merge" strategy on Github. +This method is used because many pull requests do not have a clean change +history and this method allows us to normalize commit messages as well as +simplifies backporting. + +### Rewriting the commit message +After selecting "Squash and Merge" you may need to rewrite the commit message. +Usually the body of the commit messages should be cleared as well, unless it +is well written and applies to the entire changeset. +- Use imperative present tense for the first line of the message: + - Use "Add tests for" (instead of "I added tests for" or "Adding tests for") +- The default merge commit messages include the PR number at the end of the +commit message, keep this in the final message. +- If applicable mention the plugin in the message. + +**Example Enhancement:** + +> Add user tag to procstat input (#4386) + +**Example Bug Fix:** + +> Fix output format of printer processor (#4417) + +## After Merge + +[Update the Changelog](https://github.com/influxdata/telegraf/blob/master/docs/maintainers/CHANGELOG.md). + +If required, backport the patch and the changelog update to the current +release branch. Usually this can be done by cherry picking the commits: +``` +git cherry-pick -x aaaaaaaa bbbbbbbb +``` + +Backporting changes to the changelog often pulls in unwanted changes. After +cherry picking commits, double check that the only the expected lines are +modified and if needed clean up the changelog and amend the change. Push the +new master and release branch to Github. diff --git a/docs/maintainers/RELEASES.md b/docs/maintainers/RELEASES.md new file mode 100644 index 0000000000000..3c05cdf968715 --- /dev/null +++ b/docs/maintainers/RELEASES.md @@ -0,0 +1,97 @@ +# Releases + +## Release Branch + +On master, update `etc/telegraf.conf` and commit: +```sh +./telegraf config > etc/telegraf.conf +``` + +Create the new release branch: +```sh +git checkout -b release-1.15 +``` + +Push the changes: +```sh +git push origin release-1.15 master +``` + +Update next version strings on master: +```sh +git checkout master +echo 1.16.0 > build_version.txt +``` + +## Release Candidate + +Release candidates are created only for new minor releases (ex: 1.15.0). Tags +are created but some of the other tasks, such as adding a changelog entry are +skipped. Packages are added to the github release page and posted to +community but are not posted to package repos or docker hub. +```sh +git checkout release-1.15 +git commit --allow-empty -m "Telegraf 1.15.0-rc1" +git tag -s v1.15.0-rc1 -m "Telegraf 1.15.0-rc1" +git push origin release-1.15 v1.15.0-rc1 +``` + +## Release + +On master, set the release date in the changelog and cherry-pick the change +back: +```sh +git checkout master +vi CHANGELOG.md +git commit -m "Set 1.8.0 release date" +git checkout release-1.8 +git cherry-pick -x +``` + +Double check that the changelog was applied as desired, or fix it up and +amend the change before pushing. + +Tag the release: +```sh +git checkout release-1.8 +# This just improves the `git show 1.8.0` output +git commit --allow-empty -m "Telegraf 1.8.0" +git tag -s v1.8.0 -m "Telegraf 1.8.0" +``` + +Check that the version was set correctly, the tag can always be altered if a +mistake is made but only before you push it to Github: +```sh +make +./telegraf --version +Telegraf v1.8.0 (git: release-1.8 aaaaaaaa) +``` + +When you push a branch with a tag to Github, CircleCI will be triggered to +build the packages. +```sh +git push origin master release-1.8 v1.8.0 +``` + +Set the release notes on Github. + +Update webpage download links. + +Update apt and yum repositories hosted at repos.influxdata.com. + +Update the package signatures on S3, these are used primarily by the docker images. + +Update docker image [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker): +```sh +cd influxdata-docker +git co master +git pull +git co -b telegraf-1.8.0 +telegraf/1.8/Dockerfile +telegraf/1.8/alpine/Dockerfile +git commit -am "telegraf 1.8.0" +``` + +Official company post to RSS/community. + +Update documentation on docs.influxdata.com diff --git a/etc/telegraf.conf b/etc/telegraf.conf index a07e922c3aeed..c6e35887ec907 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -90,12 +90,15 @@ ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false - ############################################################################### # OUTPUT PLUGINS # ############################################################################### @@ -171,7 +174,7 @@ ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -306,12 +309,36 @@ # ## Context Tag Sources add Application Insights context tags to a tag value. # ## # ## For list of allowed context tag keys see: -# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go # # [outputs.application_insights.context_tag_sources] # # "ai.cloud.role" = "kubernetes_container_name" # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + # # Send aggregate metrics to Azure Monitor # [[outputs.azure_monitor]] # ## Timeout for HTTP writes. @@ -401,16 +428,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -435,10 +465,70 @@ # # high_resolution_metrics = false +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + # # Configuration for CrateDB to send metrics to. # [[outputs.cratedb]] -# # A github.com/jackc/pgx connection string. -# # See https://godoc.org/github.com/jackc/pgx#ParseDSN +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig # url = "postgres://user:password@localhost/schema?sslmode=disable" # # Timeout for all CrateDB queries. # timeout = "5s" @@ -446,6 +536,8 @@ # table = "metrics" # # If true, and the metrics table does not exist, create it automatically. # table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" # # Configuration for DataDog API to send metrics to. @@ -458,6 +550,9 @@ # # ## Write URL override; useful for debugging. # # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) +# # http_proxy_url = "http://localhost:8888" # # Send metrics to nowhere at all @@ -482,8 +577,8 @@ # ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. # api_token = "" # -# ## Optional prefix for metric names (e.g.: "telegraf.") -# prefix = "telegraf." +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -496,6 +591,13 @@ # # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" # # Configuration for Elasticsearch to send metrics to. @@ -509,6 +611,8 @@ # ## Set to true to ask Elasticsearch a list of all cluster nodes, # ## thus it is not necessary to list all nodes in the urls config option. # enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" @@ -627,6 +731,11 @@ # ## Enable Graphite tags support # # graphite_tag_support = false # +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# # ## Character for separating metric name and field for Graphite tags # # graphite_separator = "." # @@ -730,6 +839,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -744,6 +862,11 @@ # # [outputs.http.headers] # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 # # Configuration for sending metrics to InfluxDB @@ -883,14 +1006,19 @@ # ## routing_key = "telegraf" # # routing_key = "" # -# ## CompressionCodec represents the various compression codecs recognized by +# ## Compression codec represents the various compression codecs recognized by # ## Kafka in messages. -# ## 0 : No compression -# ## 1 : Gzip compression -# ## 2 : Snappy compression -# ## 3 : LZ4 compression +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD # # compression_codec = 0 # +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# # ## RequiredAcks is used in Produce Requests to tell the broker how many # ## replica acknowledgements it must see before responding # ## 0 : the producer never waits for an acknowledgement from the broker. @@ -916,7 +1044,6 @@ # # max_message_bytes = 1000000 # # ## Optional TLS Config -# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -927,6 +1054,23 @@ # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # @@ -944,16 +1088,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -1023,6 +1170,50 @@ # +# # Send aggregate metrics to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # Send logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] # servers = ["localhost:1883"] # required. @@ -1063,6 +1254,12 @@ # ## actually reads it # # retain = false # +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -1075,6 +1272,9 @@ # ## URLs of NATS servers # servers = ["nats://localhost:4222"] # +# ## Optional client name +# # name = "" +# # ## Optional credentials # # username = "" # # password = "" @@ -1112,6 +1312,14 @@ # # ## Timeout for writes to the New Relic API. # # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" # # Send telegraf measurements to NSQD @@ -1128,6 +1336,41 @@ # data_format = "influx" +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + # # Configuration for OpenTSDB server to send metrics to # [[outputs.opentsdb]] # ## prefix for metrics keys @@ -1165,7 +1408,7 @@ # ## Prometheus format. When using the prometheus input, use the same value in # ## both plugins to ensure metrics are round-tripped without modification. # ## -# ## example: metric_version = 1; deprecated in 1.13 +# ## example: metric_version = 1; # ## metric_version = 2; recommended version # # metric_version = 1 # @@ -1247,6 +1490,116 @@ # separator = " " +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + # # Generic socket writer capable of handling multiple socket types. # [[outputs.socket_writer]] # ## URL to connect to @@ -1286,6 +1639,46 @@ # # data_format = "influx" +# # Send metrics to SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" + + # # Configuration for Google Cloud Stackdriver to send metrics to # [[outputs.stackdriver]] # ## GCP Project @@ -1334,7 +1727,7 @@ # ## Bear in mind that in some serializer a metric even though serialized to multiple # ## lines cannot be split any further so setting this very low might not work # ## as expected. -# # max_request_body_size = 1_000_000 +# # max_request_body_size = 1000000 # # ## Additional, Sumo specific options. # ## Full list can be found here: @@ -1435,6 +1828,121 @@ # # default_appname = "Telegraf" +# # Configuration for Amazon Timestream output. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + # # Write metrics to Warp 10 # [[outputs.warp10]] # # Prefix to add to the measurement. @@ -1509,6 +2017,12 @@ # ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. # #truncate_tags = false # +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true +# # ## Define a mapping, namespaced by metric prefix, from string values to numeric values # ## deprecated in 1.9; use the enum processor plugin # #[[outputs.wavefront.string_to_number.elasticsearch]] @@ -1517,11 +2031,103 @@ # # red = 0.0 +# # Generic WebSocket output writer. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:8080/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + ############################################################################### # PROCESSOR PLUGINS # ############################################################################### +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + # # Clone metrics and apply modifications. # [[processors.clone]] # ## All modifications on inputs and aggregators can be overridden: @@ -1616,10 +2222,10 @@ # # Map enum values according to given table. # [[processors.enum]] # [[processors.enum.mapping]] -# ## Name of the field to map +# ## Name of the field to map. Globs accepted. # field = "status" # -# ## Name of the tag to map +# ## Name of the tag to map. Globs accepted. # # tag = "status" # # ## Destination tag or field to be used for the mapped value. By default the @@ -1779,17 +2385,25 @@ # value_key = "value" -# # Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file # [[processors.port_name]] # [[processors.port_name]] # ## Name of tag holding the port number # # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" # -# ## Name of output tag where service name will be added +# ## Name of output tag or field (depending on the source) where service name will be added # # dest = "service" # # ## Default tcp or udp # # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" # # Print all metrics that pass through this filter. @@ -1908,6 +2522,13 @@ # # ## File containing a Starlark script. # # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true # # Perform string processing on tags, fields, and measurements @@ -1963,6 +2584,12 @@ # ## Decode a base64 encoded utf-8 string # # [[processors.strings.base64decode]] # # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" # # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. @@ -2061,6 +2688,49 @@ # # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## +# ## This aggregator will estimate a derivative for each field, which is +# ## contained in both the first and last metric of the aggregation interval. +# ## Without further configuration the derivative will be calculated with +# ## respect to the time difference between these two measurements in seconds. +# ## The formula applied is for every field: +# ## +# ## value_last - value_first +# ## derivative = -------------------------- +# ## time_difference_in_seconds +# ## +# ## The resulting derivative will be named *fieldname_rate*. The suffix +# ## "_rate" can be configured by the *suffix* parameter. When using a +# ## derivation variable you can include its name for more clarity. +# # suffix = "_rate" +# ## +# ## As an abstraction the derivative can be calculated not only by the time +# ## difference but by the difference of a field, which is contained in the +# ## measurement. This field is assumed to be monotonously increasing. This +# ## feature is used by specifying a *variable*. +# ## Make sure the specified variable is not filtered and exists in the metrics +# ## passed to this aggregator! +# # variable = "" +# ## +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## +# ## Note, that the calculation is based on the actual timestamp of the +# ## measurements. When there is only one measurement during that period, the +# ## measurement will be rolled over to the next period. The maximum number of +# ## such roll-overs can be configured with a default of 10. +# # max_roll_over = 10 +# ## + + # # Report the final metric of a series # [[aggregators.final]] # ## The period on which to flush & clear the aggregator. @@ -2124,6 +2794,34 @@ # drop_original = false +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + # # Count the occurrence of values in fields. # [[aggregators.valuecounter]] # ## General Aggregator Arguments: @@ -2147,9 +2845,9 @@ percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false @@ -2270,7 +2968,7 @@ # # disable_query_namespaces = true # default false # # namespaces = ["namespace1", "namespace2"] # -# # Enable set level telmetry +# # Enable set level telemetry # # query_sets = true # default: false # # Add namespace set combinations to limit sets executed on # # Leave blank to do all sets @@ -2283,9 +2981,20 @@ # # by default, aerospike produces a 100 bucket histogram # # this is not great for most graphing tools, this will allow # # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. # # num_histogram_buckets = 100 # default: 10 +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + # # Read Apache status information (mod_status) # [[inputs.apache]] # ## An array of URLs to gather from, must be directed at the machine @@ -2362,7 +3071,7 @@ # ## If not specified, then default is: # bcachePath = "/sys/fs/bcache" # -# ## By default, telegraf gather stats for all bcache devices +# ## By default, Telegraf gather stats for all bcache devices # ## Setting devices will restrict the stats to the specified # ## bcache devices. # bcacheDevs = ["bcache0"] @@ -2378,6 +3087,41 @@ # tubes = ["notifications"] +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read BIND nameserver XML statistics # [[inputs.bind]] # ## An array of BIND XML statistics URI to gather stats. @@ -2385,6 +3129,9 @@ # # urls = ["http://localhost:8053/xml/v3"] # # gather_memory_contexts = false # # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" # # Collect bond interface status, slaves statuses and failures count @@ -2465,7 +3212,14 @@ # ## suffix used to identify socket files # socket_suffix = "asok" # -# ## Ceph user to authenticate as +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. # ceph_user = "client.admin" # # ## Ceph configuration to use to locate the cluster @@ -2474,7 +3228,8 @@ # ## Whether to gather statistics via the admin socket # gather_admin_socket_stats = true # -# ## Whether to gather statistics via ceph commands +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified # gather_cluster_stats = false @@ -2507,16 +3262,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -2526,6 +3284,9 @@ # ## ex: endpoint_url = "http://localhost:8000" # # endpoint_url = "" # +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy_url = "http://localhost:8888" +# # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -2553,8 +3314,10 @@ # ## Configure the TTL for the internal cache of metrics. # # cache_ttl = "1h" # -# ## Metric Statistic Namespace (required) -# namespace = "AWS/ELB" +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is # ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -2583,6 +3346,7 @@ # # # # ## Dimension filters for Metric. All dimensions defined for the metric names # # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -2642,7 +3406,7 @@ # # tag_delimiter = ":" -# # Read metrics from one or many couchbase clusters +# # Read per-node and per-bucket metrics from Couchbase # [[inputs.couchbase]] # ## specify servers via a url matching: # ## [protocol://][:password]@address[:port] @@ -2654,6 +3418,17 @@ # ## If no protocol is specified, HTTP is used. # ## If no port is specified, 8091 is used. # servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false # # Read CouchDB Stats from one or more servers @@ -2667,6 +3442,18 @@ # # basic_password = "p@ssw0rd" +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + # # Input plugin for DC/OS metrics # [[inputs.dcos]] # ## The DC/OS cluster URL. @@ -2779,13 +3566,30 @@ # ## Timeout for docker list, info, and stats commands # timeout = "5s" # -# ## Whether to report for each container per-device blkio (8:0, 8:1...) and -# ## network (eth0, eth1, ...) stats or not +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. # perdevice = true # -# ## Whether to report for each container total blkio and network stats or not +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. # total = false # +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# # ## Which environment variables should we use as a tag # ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] # @@ -2819,6 +3623,40 @@ # filters = [""] +# # Reads metrics from DPDK applications using v2 telemetry interface. +# [[inputs.dpdk]] +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Duration that defines how long the connected socket client will wait for a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local socket access +# ## to a fast packet processing application, the timeout should be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended) +# # socket_access_timeout = "200ms" +# +# ## Enables telemetry data collection for selected device types. +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). +# # device_types = ["ethdev"] +# +# ## List of custom, application-specific telemetry commands to query +# ## The list of available commands depend on the application deployed. Applications can register their own commands +# ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For e.g. L3 Forwarding with Power Management Sample Application this could be: +# ## additional_commands = ["/l3fwd-power/stats"] +# # additional_commands = [] +# +# ## Allows turning off collecting data for individual "ethdev" commands. +# ## Remove "/ethdev/link_status" from list to start getting link status metrics. +# [inputs.dpdk.ethdev] +# exclude_commands = ["/ethdev/link_status"] +# +# ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify +# ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. +# ## [inputs.dpdk.tags] +# ## dpdk_instance = "my-fwd-app" + + # # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. # [[inputs.ecs]] # ## ECS metadata url. @@ -2878,6 +3716,7 @@ # cluster_stats_only_from_master = true # # ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. # indices_include = ["_all"] # # ## One of "shards", "cluster", "indices" @@ -2898,21 +3737,108 @@ # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false - - -# # Returns ethtool statistics for given interfaces -# [[inputs.ethtool]] -# ## List of interfaces to pull metrics for -# # interface_include = ["eth0"] # -# ## List of interfaces to ignore when pulling metrics. -# # interface_exclude = ["eth1"] +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them +# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 -# # Read metrics from one or more commands that can output to stdout -# [[inputs.exec]] -# ## Commands array -# commands = [ +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ # "/tmp/test.sh", # "/usr/bin/mycollector --foo=bar", # "/tmp/collect_*.sh" @@ -3076,6 +4002,14 @@ # # ## Timeout for HTTP requests. # # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] # # Read flattened metrics from one or more GrayLog HTTP endpoints @@ -3188,6 +4122,15 @@ # ## compress body or "identity" to apply no encoding. # # content_encoding = "identity" # +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3195,6 +4138,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" # @@ -3256,6 +4208,12 @@ # # response_string_match = "ok" # # response_string_match = "\".*_status\".?:.?\"up\"" # +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3379,12 +4337,30 @@ # timeout = "5s" +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +# [[inputs.intel_powerstat]] +# ## All global metrics are always collected by Intel PowerStat plugin. +# ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. +# ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level +# ## telemetry will be exposed by Intel PowerStat plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" +# # cpu_metrics = [] + + # # Collect statistics about itself # [[inputs.internal]] # ## If true, collect telegraf memory stats. # # collect_memstats = true +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] # ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is @@ -3431,6 +4407,18 @@ # # ## Schema Version: (Optional, defaults to version 1) # metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" # # Gather packets and bytes counters from Linux ipsets @@ -3502,11 +4490,16 @@ # ## empty will use default value 10 # # max_subjob_per_layer = 10 # -# ## Jobs to exclude from gathering -# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] # -# ## Nodes to exclude from gathering -# # node_exclude = [ "node1", "node2" ] +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] # # ## Worker pool for jenkins plugin only # ## Empty this field will use default value 5 @@ -3856,6 +4849,13 @@ # # timeout = "5s" +# # Get md array statistics from /proc/mdstat +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + # # Read metrics from one or many memcached servers # [[inputs.memcached]] # ## An array of address to gather stats about. Specify an ip on hostname @@ -3927,7 +4927,7 @@ # [[inputs.modbus]] # ## Connection Configuration # ## -# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or # ## via serial line communication in binary (RTU) or readable (ASCII) encoding # ## # ## Device name @@ -3954,8 +4954,11 @@ # # data_bits = 8 # # parity = "N" # # stop_bits = 1 -# # transmission_mode = "RTU" # +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" # # ## Measurements # ## @@ -3985,7 +4988,8 @@ # ## |---BA, DCBA - Little Endian # ## |---BADC - Mid-Big Endian # ## |---CDAB - Mid-Little Endian -# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) # ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) # ## scale - the final numeric variable representation # ## address - variable address @@ -4012,7 +5016,7 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017"] +# servers = ["mongodb://127.0.0.1:27017?connect=direct"] # # ## When true, collect cluster status # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -4025,6 +5029,10 @@ # ## When true, collect per collection stats # # gather_col_stats = false # +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# # ## List of db where collections stats are collected # ## If empty, all db are concerned # # col_stats_dbs = ["local"] @@ -4128,6 +5136,12 @@ # ## gather metrics from SHOW SLAVE STATUS command output # # gather_slave_status = false # +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# # mariadb_dialect = false +# # ## gather metrics from SHOW BINARY LOGS command output # # gather_binary_logs = false # @@ -4157,6 +5171,13 @@ # # perf_events_statements_limit = 250 # # perf_events_statements_time_limit = 86400 # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # ## example: interval_slow = "30m" # # interval_slow = "" @@ -4240,6 +5261,35 @@ # # no configuration +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + # # Read Nginx's basic status information (ngx_http_stub_status_module) # [[inputs.nginx]] # # An array of Nginx stub_status URI to gather stats. @@ -4405,7 +5455,9 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # # bin_path = "/usr/bin/nvidia-smi" # # ## Optional: timeout for GPU polling @@ -4414,9 +5466,8 @@ # # Retrieve data from OPCUA devices # [[inputs.opcua]] -# [[inputs.opcua]] -# ## Device name -# # name = "localhost" +# ## Metric name +# # name = "opcua" # # # ## OPC UA Endpoint URL # # endpoint = "opc.tcp://localhost:4840" @@ -4453,18 +5504,41 @@ # # password = "" # # # ## Node ID configuration -# ## name - the variable name -# ## namespace - integer value 0 thru 3 -# ## identifier_type - s=string, i=numeric, g=guid, b=opaque -# ## identifier - tag as shown in opcua browser -# ## data_type - boolean, byte, short, int, uint, uint16, int16, -# ## uint32, int32, float, double, string, datetime, number +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) # ## Example: -# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} -# nodes = [ -# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, -# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, -# ] +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] # # OpenLDAP cn=Monitor plugin @@ -4638,6 +5712,9 @@ # ## option of the ping command. # # interface = "" # +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# # ## Specify the ping executable binary. # # binary = "ping" # @@ -4648,6 +5725,10 @@ # # ## Use only IPv6 addresses when resolving a hostname. # # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 # # Measure postfix queue statistics @@ -4686,9 +5767,10 @@ # # pattern = "nginx" # ## user as argument for pgrep (ie, pgrep -u ) # # user = "nginx" -# ## Systemd unit name +# ## Systemd unit name, supports globs when include_systemd_children is set to true # # systemd_unit = "nginx.service" -# ## CGroup name or path +# # include_systemd_children = false +# ## CGroup name or path, supports globs # # cgroup = "systemd/system.slice/nginx.service" # # ## Windows service name @@ -4704,6 +5786,9 @@ # ## When true add the full cmdline as a tag. # # cmdline_tag = false # +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# # ## Add the PID as a tag instead of as a field. When collecting multiple # ## processes with otherwise matching tags this setting should be enabled to # ## ensure each process has a unique identity. @@ -4724,6 +5809,8 @@ # ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. # base_url = "https://localhost:8006/api2/json" # api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -4781,6 +5868,12 @@ # ## specified, metrics for all exchanges are gathered. # # exchanges = ["telegraf"] # +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# # ## Queues to include and exclude. Globs accepted. # ## Note that an empty array for both will include all queues # queue_name_include = [] @@ -4802,11 +5895,39 @@ # urls = ["http://localhost:8080/_raindrops"] -# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). -# [[inputs.ras]] -# ## Optional path to RASDaemon sqlite3 database. -# ## Default: /var/lib/rasdaemon/ras-mc_event.db -# # db_path = "" +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on +# url = "https://localhost:8080" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] # # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs @@ -4847,9 +5968,13 @@ # # ## Optional. Specify redis commands to retrieve values # # [[inputs.redis.commands]] -# # command = ["get", "sample-key"] -# # field = "sample-key-value" -# # type = "string" +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" # # ## specify server password # # password = "s#cr@t%" @@ -4962,8 +6087,13 @@ # # Retrieves SNMP values from remote agents # [[inputs.snmp]] # ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional # ## example: agents = ["udp://127.0.0.1:161"] # ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] # agents = ["udp://127.0.0.1:161"] # # ## Timeout for each request. @@ -4988,7 +6118,7 @@ # ## # ## Security Name. # # sec_name = "myuser" -# ## Authentication protocol; one of "MD5", "SHA", or "". +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # # auth_protocol = "MD5" # ## Authentication password. # # auth_password = "pass" @@ -5123,73 +6253,6 @@ # # password = "pa$$word" -# # Read metrics from Microsoft SQL Server -# [[inputs.sqlserver]] -# ## Specify instances to monitor with a list of connection strings. -# ## All connection parameters are optional. -# ## By default, the host is localhost, listening on default port, TCP 1433. -# ## for Windows, the user is the currently running AD user (SSO). -# ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters, in particular, tls connections can be created like so: -# ## "encrypt=true;certificate=;hostNameInCertificate=" -# # servers = [ -# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# # ] -# -# ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are -# ## "AzureSQLDB" -# ## "SQLServer" -# ## "AzureSQLManagedInstance" -# # database_type = "AzureSQLDB" -# -# -# ## Optional parameter, setting this to 2 will use a new version -# ## of the collection queries that break compatibility with the original -# ## dashboards. -# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -# query_version = 2 -# -# ## If you are using AzureDB, setting this to true will gather resource utilization metrics -# # azuredb = false -# -# ## Possible queries -# ## Version 2: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - DatabaseIO -# ## - ServerProperties -# ## - MemoryClerk -# ## - Schedulers -# ## - SqlRequests -# ## - VolumeSpace -# ## - Cpu -# -# ## Version 1: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - CPUHistory -# ## - DatabaseIO -# ## - DatabaseSize -# ## - DatabaseStats -# ## - DatabaseProperties -# ## - MemoryClerk -# ## - VolumeSpace -# ## - PerformanceMetrics -# -# -# ## Queries enabled by default for specific Database Type -# ## database_type = AzureSQLDB -# ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO -# -# ## A list of queries to include. If not specified, all the above listed queries are used. -# # include_query = [] -# -# ## A list of queries to explicitly ignore. -# exclude_query = [ 'Schedulers' , 'SqlRequests'] - - # # Gather timeseries from Google Cloud Platform v3 monitoring API # [[inputs.stackdriver]] # ## GCP Project @@ -5341,6 +6404,13 @@ # ## values are "socket", "target", "device", "mount", "automount", "swap", # ## "timer", "path", "slice" and "scope ": # # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" # # Reads metrics from a Teamspeak 3 Server via ServerQuery @@ -5488,7 +6558,10 @@ # # Reads metrics from a SSL certificate # [[inputs.x509_cert]] # ## List certificate sources -# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] # # ## Timeout for SSL connection # # timeout = "5s" @@ -5503,7 +6576,7 @@ # # tls_key = "/etc/telegraf/key.pem" -# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets # [[inputs.zfs]] # ## ZFS kstat path. Ignored on FreeBSD # ## If not specified, then default is: @@ -5517,6 +6590,8 @@ # # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] # ## By default, don't gather zpool stats # # poolMetrics = false +# ## By default, don't gather zdataset stats +# # datasetMetrics = false # # Reads 'mntr' stats from one or many zookeeper servers @@ -5545,30 +6620,130 @@ ############################################################################### -# # Intel Resource Director Technology plugin -# [[inputs.IntelRDT]] -# ## Optionally set sampling interval to Nx100ms. -# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. -# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. -# # sampling_interval = "10" -# -# ## Optionally specify the path to pqos executable. -# ## If not provided, auto discovery will be performed. -# # pqos_path = "/usr/local/bin/pqos" +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.KNXListener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" # -# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. -# ## If not provided, default value is false. -# # shortened_metrics = false -# -# ## Specify the list of groups of CPU core(s) to be provided as pqos input. -# ## Mandatory if processes aren't set and forbidden if processes are specified. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # cores = ["0-3"] -# -# ## Specify the list of processes for which Metrics will be collected. -# ## Mandatory if cores aren't set and forbidden if cores are specified. -# ## e.g. ["qemu", "pmd"] -# # processes = ["process"] +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false # # AMQP consumer plugin @@ -5696,12 +6871,16 @@ # ## Define aliases to map telemetry encoding paths to simple measurement names # [inputs.cisco_telemetry_mdt.aliases] # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# ModTs = "ignore" +# CreateTs = "ignore" # # Read metrics from one or many ClickHouse servers # [[inputs.clickhouse]] # ## Username for authorization on ClickHouse server -# ## example: username = "default"" +# ## example: username = "default" # username = "default" # # ## Password for authorization on ClickHouse server @@ -5891,6 +7070,46 @@ # data_format = "influx" +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec +# data_format = "influx" + + # # Read logging output from the Docker engine # [[inputs.docker_log]] # ## Docker Endpoint @@ -5937,8 +7156,6 @@ # ## This requires one of the following sets of environment variables to be set: # ## # ## 1) Expected Environment Variables: -# ## - "EVENTHUB_NAMESPACE" -# ## - "EVENTHUB_NAME" # ## - "EVENTHUB_CONNECTION_STRING" # ## # ## 2) Expected Environment Variables: @@ -5947,8 +7164,17 @@ # ## - "EVENTHUB_KEY_NAME" # ## - "EVENTHUB_KEY_VALUE" # +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# # ## Uncommenting the option below will create an Event Hub client based solely on the connection string. # ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) # # connection_string = "" # # ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister @@ -6044,7 +7270,7 @@ # username = "cisco" # password = "cisco" # -# ## gNMI encoding requested (one of: "proto", "json", "json_ietf") +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") # # encoding = "proto" # # ## redial in case of failures after @@ -6137,7 +7363,14 @@ # service_address = ":8080" # # ## Path to listen to. -# # path = "/telegraf" +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false # # ## HTTP methods to accept. # # methods = ["POST", "PUT"] @@ -6148,7 +7381,7 @@ # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # # max_body_size = "500MB" # # ## Part of the request to consume. Available options are "body" and @@ -6247,6 +7480,36 @@ # # token = "some-long-shared-secret-token" +# # Intel Resource Director Technology plugin +# [[inputs.intel_rdt]] +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false + + # # Read JTI OpenConfig Telemetry from listed sensors # [[inputs.jti_openconfig_telemetry]] # ## List of device addresses to collect telemetry from @@ -6318,7 +7581,6 @@ # # version = "" # # ## Optional TLS Config -# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -6326,16 +7588,42 @@ # # insecure_skip_verify = false # # ## SASL authentication credentials. These settings should typically be used -# ## with TLS encryption enabled using the "enable_tls" option. +# ## with TLS encryption enabled # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # # ## Name of the consumer group. # # consumer_group = "telegraf_metrics_consumers" # +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# # ## Initial offset position; one of "oldest" or "newest". # # offset = "oldest" # @@ -6398,16 +7686,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -6439,6 +7730,15 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# # ## Optional # ## Configuration for a dynamodb checkpoint # [inputs.kinesis_consumer.checkpoint_dynamodb] @@ -6447,6 +7747,30 @@ # table_name = "default" +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + # # Read metrics off Arista LANZ, via socket # [[inputs.lanz]] # ## URL to Arista LANZ endpoint @@ -6512,7 +7836,7 @@ # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # ## Broker URLs for the MQTT server or cluster. To connect to multiple -# ## clusters or standalone servers, use a seperate plugin instance. +# ## clusters or standalone servers, use a separate plugin instance. # ## example: servers = ["tcp://localhost:1883"] # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] @@ -6660,6 +7984,32 @@ # data_format = "influx" +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: @@ -6755,6 +8105,15 @@ # ## The script option can be used to specify the .sql file path. # ## If script and sqlquery options specified at same time, sqlquery will be used # ## +# ## the tagvalue field is used to define custom tags (separated by comas). +# ## the query is expected to return columns which match the names of the +# ## defined tags. The values in these columns must be of a string-type, +# ## a number-type or a blob-type. +# ## +# ## The timestamp field is used to override the data points timestamp value. By +# ## default, all rows inserted with current time. By setting a timestamp column, +# ## the row will be inserted with that column's value. +# ## # ## Structure : # ## [[inputs.postgresql_extensible.query]] # ## sqlquery string @@ -6762,6 +8121,7 @@ # ## withdbname boolean # ## tagvalue string (comma separated) # ## measurement string +# ## timestamp string # [[inputs.postgresql_extensible.query]] # sqlquery="SELECT * FROM pg_stat_database" # version=901 @@ -6785,12 +8145,12 @@ # ## value in both plugins to ensure metrics are round-tripped without # ## modification. # ## -# ## example: metric_version = 1; deprecated in 1.13 +# ## example: metric_version = 1; # ## metric_version = 2; recommended version # # metric_version = 1 # # ## Url tag name (tag containing scrapped url. optional, default is "url") -# # url_tag = "scrapeUrl" +# # url_tag = "url" # # ## An array of Kubernetes services to scrape metrics from. # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -6805,6 +8165,16 @@ # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. # ## - prometheus.io/port: If port is not 9102 use this annotation # # monitor_kubernetes_pods = true +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" @@ -6814,6 +8184,19 @@ # # eg. To scrape pods on a specific node # # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" # ## OR @@ -6835,6 +8218,42 @@ # # insecure_skip_verify = false +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# [[inputs.ras]] +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + +# # Riemann protobuff listener. +# [[inputs.riemann_listener]] +# ## URL to listen on. +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + # # SFlow V5 Protocol Listener # [[inputs.sflow]] # ## Address to listen for sFlow packets. @@ -6858,6 +8277,10 @@ # ## 1024. See README.md for details # ## # # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## # ## Timeout running snmptranslate command # # timeout = "5s" # ## Snmp version, defaults to 2c @@ -6939,6 +8362,152 @@ # # content_encoding = "identity" +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# # database_type = "AzureSQLDB" +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# # database_type = "AzureSQLManagedInstance" +# +# # include_query = [] +# +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# +# database_type = "SQLServer" +# +# include_query = [] +# +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false + + # # Statsd UDP/TCP Server # [[inputs.statsd]] # ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) @@ -6983,6 +8552,10 @@ # ## Parses datadog extensions to the statsd format # datadog_extensions = false # +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# # ## Statsd data translation templates, more info can be read here: # ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # # templates = [ @@ -6997,11 +8570,14 @@ # ## calculation of percentiles. Raising this limit increases the accuracy # ## of percentiles but also increases the memory usage and cpu time. # percentile_limit = 1000 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# #max_ttl = "1000h" -# # Suricata stats plugin +# # Suricata stats and alerts plugin # [[inputs.suricata]] -# ## Data sink for Suricata stats log +# ## Data sink for Suricata stats and alerts logs # # This is expected to be a filename of a # # unix socket to be created for listening. # source = "/var/run/suricata-stats.sock" @@ -7009,6 +8585,9 @@ # # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # # becomes "detect_alert" when delimiter is "_". # delimiter = "_" +# +# ## Detect alert logs +# # alerts = false # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 @@ -7053,6 +8632,11 @@ # ## By default best effort parsing is off. # # best_effort = false # +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# # ## Character to prepend to SD-PARAMs (default = "_"). # ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. # ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] @@ -7069,7 +8653,8 @@ # ## "/var/log/**.log" -> recursively find all .log files in /var/log # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log # ## "/var/log/apache.log" -> just tail the apache log file -# ## +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/mymetrics.out"] @@ -7103,6 +8688,9 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# # ## multiline parser/codec # ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html # #[inputs.tail.multiline] @@ -7311,12 +8899,22 @@ # # custom_attribute_include = [] # # custom_attribute_exclude = ["*"] # +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# # ## Optional SSL Config # # ssl_ca = "/path/to/cafile" # # ssl_cert = "/path/to/certfile" # # ssl_key = "/path/to/keyfile" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" # # A Webhooks Event collector diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 5b70928994158..ee67219c3c3f5 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -90,12 +90,15 @@ ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false - ############################################################################### # OUTPUT PLUGINS # ############################################################################### @@ -171,7 +174,7 @@ ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -179,6 +182,696 @@ ## existing data has been written. # influx_uint_support = false + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) +# # http_proxy_url = "http://localhost:8888" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option. +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## Program to run as daemon +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode metric groups. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# timeout = 2 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 + + # # Configuration for sending metrics to InfluxDB # [[outputs.influxdb_v2]] # ## The URLs of the InfluxDB cluster nodes. @@ -186,7 +879,7 @@ # ## Multiple URLs can be specified for a single cluster, only ONE of the # ## urls will be written to each interval. # ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] -# urls = ["http://127.0.0.1:9999"] +# urls = ["http://127.0.0.1:8086"] # # ## Token for authentication. # token = "" @@ -232,188 +925,7970 @@ # # insecure_skip_verify = false -############################################################################### -# INPUT PLUGINS # -############################################################################### +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communication to Instrumental +# debug = false -# Windows Performance Counters plugin. -# These are the recommended method of monitoring system metrics on windows, -# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, -# which utilize more system resources. -# -# See more configuration examples at: -# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters - -[[inputs.win_perf_counters]] - [[inputs.win_perf_counters.object]] - # Processor usage, alternative to native, reports on a per core. - ObjectName = "Processor" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Interrupt Time", - "% Privileged Time", - "% User Time", - "% Processor Time", - "% DPC Time", - ] - Measurement = "win_cpu" - # Set to true to include _Total instance when querying for all (*). - IncludeTotal=true - - [[inputs.win_perf_counters.object]] - # Disk times and queues - ObjectName = "LogicalDisk" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - "% Free Space", - "Current Disk Queue Length", - "Free Megabytes", - ] - Measurement = "win_disk" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - ObjectName = "PhysicalDisk" - Instances = ["*"] - Counters = [ - "Disk Read Bytes/sec", - "Disk Write Bytes/sec", - "Current Disk Queue Length", - "Disk Reads/sec", - "Disk Writes/sec", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - ] - Measurement = "win_diskio" - - [[inputs.win_perf_counters.object]] - ObjectName = "Network Interface" - Instances = ["*"] - Counters = [ - "Bytes Received/sec", - "Bytes Sent/sec", - "Packets Received/sec", - "Packets Sent/sec", - "Packets Received Discarded", - "Packets Outbound Discarded", - "Packets Received Errors", - "Packets Outbound Errors", - ] - Measurement = "win_net" - - [[inputs.win_perf_counters.object]] - ObjectName = "System" - Counters = [ - "Context Switches/sec", - "System Calls/sec", - "Processor Queue Length", - "System Up Time", - ] - Instances = ["------"] - Measurement = "win_system" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Memory object. - ObjectName = "Memory" - Counters = [ - "Available Bytes", - "Cache Faults/sec", - "Demand Zero Faults/sec", - "Page Faults/sec", - "Pages/sec", - "Transition Faults/sec", - "Pool Nonpaged Bytes", - "Pool Paged Bytes", - "Standby Cache Reserve Bytes", - "Standby Cache Normal Priority Bytes", - "Standby Cache Core Bytes", - ] - # Use 6 x - to remove the Instance bit from the query. - Instances = ["------"] - Measurement = "win_mem" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Paging File object. - ObjectName = "Paging File" - Counters = [ - "% Usage", - ] - Instances = ["_Total"] - Measurement = "win_swap" - - -# Windows system plugins using WMI (disabled by default, using -# win_perf_counters over WMI is recommended) - - -# # Read metrics about cpu usage -# [[inputs.cpu]] -# ## Whether to report per-cpu stats or not -# percpu = true -# ## Whether to report total system cpu stats or not -# totalcpu = true -# ## If true, collect raw CPU time metrics. -# collect_cpu_time = false -# ## If true, compute and report the sum of all non-idle CPU states. -# report_active = false - - -# # Read metrics about disk usage by mount point -# [[inputs.disk]] -# ## By default stats will be gathered for all mount points. -# ## Set mount_points will restrict the stats to only the specified mount points. -# # mount_points = ["/"] -# -# ## Ignore mount points by filesystem type. -# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] - - -# # Read metrics about disk IO by device -# [[inputs.diskio]] -# ## By default, telegraf will gather stats for all devices including -# ## disk partitions. -# ## Setting devices will restrict the stats to the specified devices. -# # devices = ["sda", "sdb", "vd*"] -# ## Uncomment the following line if you need disk serial numbers. -# # skip_serial_number = false -# # -# ## On systems which support it, device metadata can be added in the form of -# ## tags. -# ## Currently only Linux is supported via udev properties. You can view -# ## available properties for a device by running: -# ## 'udevadm info -q property -n /dev/sda' -# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] -# # -# ## Using the same metadata source as device_tags, you can also customize the -# ## name of the device via templates. -# ## The 'name_templates' parameter is a list of templates to try and apply to -# ## the device. The template may contain variables in the form of '$PROPERTY' or -# ## '${PROPERTY}'. The first template which does not contain any variables not -# ## present for the device is used as the device name tag. -# ## The typical use case is for LVM volumes, to get the VG/LV name instead of -# ## the near-meaningless DM-0 name. -# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - - -# # Read metrics about memory usage -# [[inputs.mem]] -# # no configuration +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" -# # Read metrics about swap memory usage -# [[inputs.swap]] -# # no configuration +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## DEPRECATED: PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## DEPRECATED: If set the partitionKey will be a random UUID on every put. +# ## This allows for scaling across multiple shards in a stream. +# ## This will cause issues with ordering. +# use_random_partitionkey = false +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" +# +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" +# + + +# # Send aggregate metrics to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # Send logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID, if not set a random ID is generated +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## New Relic Insights API key +# insights_key = "insights api key" +# +# ## Prefix to add to add to metric name for easy identification. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# listen = ":9273" +# +# ## Metric version controls the mapping from Telegraf metrics into +# ## Prometheus format. When using the prometheus input, use the same value in +# ## both plugins to ensure metrics are round-tripped without modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for packet-based connections (i.e. UDP, unixgram). +# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # Send metrics to SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for Amazon Timestream output. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Wavefront server to send metrics to +# [[outputs.wavefront]] +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" +# +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 +# +# ## prefix for metrics keys +# #prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# #simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# #metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# #convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# #use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# #use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# #source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# #convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# #truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true +# +# ## Define a mapping, namespaced by metric prefix, from string values to numeric values +# ## deprecated in 1.9; use the enum processor plugin +# #[[outputs.wavefront.string_to_number.elasticsearch]] +# # green = 1.0 +# # yellow = 0.5 +# # red = 0.0 + + +# # Generic WebSocket output writer. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:8080/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# # [processors.defaults.fields] +# # field_1 = "bar" +# # time_idle = 0 +# # is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## Program to run as daemon +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # ## Field to change +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## +# ## This aggregator will estimate a derivative for each field, which is +# ## contained in both the first and last metric of the aggregation interval. +# ## Without further configuration the derivative will be calculated with +# ## respect to the time difference between these two measurements in seconds. +# ## The formula applied is for every field: +# ## +# ## value_last - value_first +# ## derivative = -------------------------- +# ## time_difference_in_seconds +# ## +# ## The resulting derivative will be named *fieldname_rate*. The suffix +# ## "_rate" can be configured by the *suffix* parameter. When using a +# ## derivation variable you can include its name for more clarity. +# # suffix = "_rate" +# ## +# ## As an abstraction the derivative can be calculated not only by the time +# ## difference but by the difference of a field, which is contained in the +# ## measurement. This field is assumed to be monotonously increasing. This +# ## feature is used by specifying a *variable*. +# ## Make sure the specified variable is not filtered and exists in the metrics +# ## passed to this aggregator! +# # variable = "" +# ## +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## +# ## Note, that the calculation is based on the actual timestamp of the +# ## measurements. When there is only one measurement during that period, the +# ## measurement will be rolled over to the next period. The maximum number of +# ## such roll-overs can be configured with a default of 10. +# # max_roll_over = 10 +# ## + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy_url = "http://localhost:8888" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. +# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. +# ## +# ## example: metric_version = 1; deprecated in 1.15 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-ee-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false +# +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## Which environment variables should we use as a tag +# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to also obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them +# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' +# ## Server address (default http://localhost) +# # address = "http://localhost" +# +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. httpjson_webserver_stats +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Headers (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# # DEPRECATED: the jolokia plugin has been deprecated in favor of the +# # jolokia2 plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# ## NOTE that your jolokia security policy must allow for POST requests. +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## Attribute delimiter +# ## +# ## When multiple attributes are returned for a single +# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric +# ## name, and the attribute name, separated by the given delimiter. +# # delimiter = "_" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# servers = ["mongodb://127.0.0.1:27017"] +# +# ## When true, collect cluster status +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# # An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.cer" +# tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' full status information (ngx_http_status_module) +# [[inputs.nginx_plus]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # A plugin to collect stats from the NSD authoritative DNS name server +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the estabilished connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## Example: +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# ## The default timeout of 1000ms can be overridden with (in milliseconds): +# timeout = 1000 + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on +# url = "https://localhost:8080" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Server url +# address = "https://127.0.0.1:5000" +# +# ## Username, Password for hardware server +# username = "root" +# password = "password123456" +# +# ## ComputerSystemId +# computer_system_id="2M220100SL" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# ## +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# ## +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Agent host tag; the tag used to reference the source host +# # agent_host_tag = "agent_host" +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # Input plugin to collect Windows Event Log messages +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels +# ## (System log, for example) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", +# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", +# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# +# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. +# ## Human-readable representation of those nodes is formatted into event Message field, +# ## but XML is more machine-parsable +# +# # Process UserData XML to fields, if this node exists in Event XML +# process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# separator = "_" +# +# ## Get only first line of Message field. For most events first line is usually more than enough +# only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if set to false +# timestamp_from_event = true +# +# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") +# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Default list of fields to send. All fields are sent by default. Globbing supported +# event_fields = ["*"] +# +# ## Fields to exclude. Also applied to data fields. Globbing supported +# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] +# +# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported +# exclude_empty = ["*ActivityID", "UserID"] + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. +# ## See the README file for more examples. +# ## Uncomment examples below or write your own as you see fit. If the system +# ## being polled for data does not have the Object at startup of the Telegraf +# ## agent, it will not be gathered. +# ## Settings: +# # PrintValid = false # Print All matching performance counters +# # Whether request a timestamp along with the PerfCounter data or just use current time +# # UsePerfCounterTime=true +# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# #UseWildcardsExpansion = false +# # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# CountersRefreshInterval="1m" +# +# [[inputs.win_perf_counters.object]] +# # Processor usage, alternative to native, reports on a per core. +# ObjectName = "Processor" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Interrupt Time", +# "% Privileged Time", +# "% User Time", +# "% Processor Time", +# "% DPC Time", +# ] +# Measurement = "win_cpu" +# # Set to true to include _Total instance when querying for all (*). +# # IncludeTotal=false +# # Print out when the performance counter is missing from object, counter or instance. +# # WarnOnMissing = false +# +# [[inputs.win_perf_counters.object]] +# # Disk times and queues +# ObjectName = "LogicalDisk" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# "% User Time", +# "% Free Space", +# "Current Disk Queue Length", +# "Free Megabytes", +# ] +# Measurement = "win_disk" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "PhysicalDisk" +# Instances = ["*"] +# Counters = [ +# "Disk Read Bytes/sec", +# "Disk Write Bytes/sec", +# "Current Disk Queue Length", +# "Disk Reads/sec", +# "Disk Writes/sec", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# ] +# Measurement = "win_diskio" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "Network Interface" +# Instances = ["*"] +# Counters = [ +# "Bytes Received/sec", +# "Bytes Sent/sec", +# "Packets Received/sec", +# "Packets Sent/sec", +# "Packets Received Discarded", +# "Packets Outbound Discarded", +# "Packets Received Errors", +# "Packets Outbound Errors", +# ] +# Measurement = "win_net" +# +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "System" +# Counters = [ +# "Context Switches/sec", +# "System Calls/sec", +# "Processor Queue Length", +# "System Up Time", +# ] +# Instances = ["------"] +# Measurement = "win_system" +# +# [[inputs.win_perf_counters.object]] +# # Example counterPath where the Instance portion must be removed to get data back, +# # such as from the Memory object. +# ObjectName = "Memory" +# Counters = [ +# "Available Bytes", +# "Cache Faults/sec", +# "Demand Zero Faults/sec", +# "Page Faults/sec", +# "Pages/sec", +# "Transition Faults/sec", +# "Pool Nonpaged Bytes", +# "Pool Paged Bytes", +# "Standby Cache Reserve Bytes", +# "Standby Cache Normal Priority Bytes", +# "Standby Cache Core Bytes", +# ] +# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# Measurement = "win_mem" +# +# [[inputs.win_perf_counters.object]] +# # Example query where the Instance portion must be removed to get data back, +# # such as from the Paging File object. +# ObjectName = "Paging File" +# Counters = [ +# "% Usage", +# ] +# Instances = ["_Total"] +# Measurement = "win_swap" + + +# # Input plugin to report Windows services info. +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# ## By default, don't gather zpool stats +# # poolMetrics = false +# ## By default, don't gather zdataset stats +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.KNXListener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# ModTs = "ignore" +# CreateTs = "ignore" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: username = "default" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.http_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a seperate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## +# ## the tagvalue field is used to define custom tags (separated by comas). +# ## the query is expected to return columns which match the names of the +# ## defined tags. The values in these columns must be of a string-type, +# ## a number-type or a blob-type. +# ## +# ## The timestamp field is used to override the data points timestamp value. By +# ## default, all rows inserted with current time. By setting a timestamp column, +# ## the row will be inserted with that column's value. +# ## +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# ## timestamp string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into +# ## Telegraf metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Riemann protobuff listener. +# [[inputs.riemann_listener]] +# ## URL to listen on. +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version, defaults to 2c +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# # database_type = "AzureSQLDB" +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# # database_type = "AzureSQLManagedInstance" +# +# # include_query = [] +# +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# +# database_type = "SQLServer" +# +# include_query = [] +# +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false + + +# # Statsd UDP/TCP Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# #max_ttl = "1000h" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats and alerts logs +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" +# +# ## Detect alert logs +# # alerts = false + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an +# ## indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## This field must be either "previous" or "next". +# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, +# ## whereas "next" indicates that the line belongs to the next one. +# #match_which_line = "previous" +# +# ## The invert_match field can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline +# ## filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## After the specified timeout, this plugin sends a multiline event even if no new pattern +# ## is found to start a new event. The default timeout is 5s. +# #timeout = 5s + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Generic UDP listener +# [[inputs.udp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# [inputs.webhooks.particle] +# path = "/particle" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which Telegraf listens + diff --git a/filter/filter.go b/filter/filter.go index df171257bc789..984fa3ed08f70 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -47,7 +47,7 @@ func Compile(filters []string) (Filter, error) { // hasMeta reports whether path contains any magic glob characters. func hasMeta(s string) bool { - return strings.IndexAny(s, "*?[") >= 0 + return strings.ContainsAny(s, "*?[") } type filter struct { @@ -79,13 +79,24 @@ func compileFilterNoGlob(filters []string) Filter { } type IncludeExcludeFilter struct { - include Filter - exclude Filter + include Filter + exclude Filter + includeDefault bool + excludeDefault bool } func NewIncludeExcludeFilter( include []string, exclude []string, +) (Filter, error) { + return NewIncludeExcludeFilterDefaults(include, exclude, true, false) +} + +func NewIncludeExcludeFilterDefaults( + include []string, + exclude []string, + includeDefault bool, + excludeDefault bool, ) (Filter, error) { in, err := Compile(include) if err != nil { @@ -97,7 +108,7 @@ func NewIncludeExcludeFilter( return nil, err } - return &IncludeExcludeFilter{in, ex}, nil + return &IncludeExcludeFilter{in, ex, includeDefault, excludeDefault}, nil } func (f *IncludeExcludeFilter) Match(s string) bool { @@ -105,12 +116,17 @@ func (f *IncludeExcludeFilter) Match(s string) bool { if !f.include.Match(s) { return false } + } else if !f.includeDefault { + return false } if f.exclude != nil { if f.exclude.Match(s) { return false } + } else if f.excludeDefault { + return false } + return true } diff --git a/go.mod b/go.mod index 2cae7859a7d66..7022a814b825f 100644 --- a/go.mod +++ b/go.mod @@ -1,161 +1,374 @@ module github.com/influxdata/telegraf -go 1.15 +go 1.17 require ( - cloud.google.com/go v0.53.0 - cloud.google.com/go/datastore v1.1.0 // indirect - cloud.google.com/go/pubsub v1.2.0 + cloud.google.com/go v0.93.3 // indirect + cloud.google.com/go/bigquery v1.8.0 + cloud.google.com/go/monitoring v0.2.0 + cloud.google.com/go/pubsub v1.17.0 code.cloudfoundry.org/clock v1.0.0 // indirect - collectd.org v0.3.0 - github.com/Azure/azure-event-hubs-go/v3 v3.2.0 - github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 + collectd.org v0.5.0 + github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect + github.com/Azure/azure-event-hubs-go/v3 v3.3.13 + github.com/Azure/azure-kusto-go v0.4.0 + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go v52.5.0+incompatible // indirect + github.com/Azure/azure-storage-blob-go v0.14.0 // indirect + github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd + github.com/Azure/go-amqp v0.13.12 // indirect github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Azure/go-autorest/autorest v0.9.3 - github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 - github.com/BurntSushi/toml v0.3.1 + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.18 + github.com/Azure/go-autorest/autorest/adal v0.9.16 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 + github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/BurntSushi/toml v0.4.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee - github.com/Microsoft/ApplicationInsights-Go v0.4.2 - github.com/Microsoft/go-winio v0.4.9 // indirect - github.com/Shopify/sarama v1.24.1 - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect + github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/Microsoft/hcsshim v0.8.21 // indirect + github.com/Shopify/sarama v1.29.1 + github.com/StackExchange/wmi v1.2.1 // indirect github.com/aerospike/aerospike-client-go v1.27.0 - github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 + github.com/alecthomas/participle v0.4.1 // indirect + github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 + github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 - github.com/apache/thrift v0.12.0 + github.com/antchfx/jsonquery v1.1.4 + github.com/antchfx/xmlquery v1.3.6 + github.com/antchfx/xpath v1.1.11 + github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 // indirect + github.com/apache/thrift v0.15.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 - github.com/armon/go-metrics v0.3.0 // indirect - github.com/aws/aws-sdk-go v1.33.12 - github.com/benbjohnson/clock v1.0.3 + github.com/armon/go-metrics v0.3.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.9.2 + github.com/aws/aws-sdk-go-v2/config v1.8.3 + github.com/aws/aws-sdk-go-v2/credentials v1.4.3 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 + github.com/aws/smithy-go v1.8.0 + github.com/benbjohnson/clock v1.1.0 + github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 - github.com/caio/go-tdigest v2.3.0+incompatible // indirect - github.com/cenkalti/backoff v2.0.0+incompatible // indirect + github.com/caio/go-tdigest v3.1.0+incompatible + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/cockroachdb/apd v1.1.0 // indirect - github.com/containerd/containerd v1.4.1 // indirect - github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 - github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect - github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect - github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 - github.com/dgrijalva/jwt-go v3.2.0+incompatible - github.com/dimchansky/utfbom v1.1.0 - github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect - github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible - github.com/docker/go-connections v0.3.0 // indirect - github.com/docker/go-units v0.3.3 // indirect - github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 - github.com/eclipse/paho.mqtt.golang v1.2.0 - github.com/ericchiang/k8s v1.2.0 + github.com/containerd/cgroups v1.0.1 // indirect + github.com/containerd/containerd v1.5.7 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/couchbase/go-couchbase v0.1.0 + github.com/couchbase/gomemcached v0.1.3 // indirect + github.com/couchbase/goutils v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denisenkom/go-mssqldb v0.10.0 + github.com/devigned/tab v0.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/dimchansky/utfbom v1.1.1 + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v20.10.9+incompatible + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 + github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/echlebek/timeproxy v1.0.0 // indirect + github.com/eclipse/paho.mqtt.golang v1.3.0 + github.com/fatih/color v1.10.0 // indirect + github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 - github.com/go-logfmt/logfmt v0.4.0 - github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.0 + github.com/go-logr/logr v0.4.0 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect + github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible - github.com/go-sql-driver/mysql v1.5.0 - github.com/goburrow/modbus v0.1.0 + github.com/go-sql-driver/mysql v1.6.0 + github.com/go-stack/stack v1.8.1 // indirect + github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 - github.com/gofrs/uuid v2.1.0+incompatible - github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/gofrs/uuid v3.3.0+incompatible + github.com/golang-jwt/jwt/v4 v4.1.0 + github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/protobuf v1.3.5 - github.com/google/go-cmp v0.4.0 + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/snappy v0.0.4 + github.com/google/flatbuffers v2.0.0+incompatible // indirect + github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 - github.com/gopcua/opcua v0.1.12 - github.com/gorilla/mux v1.6.2 + github.com/google/go-querystring v1.0.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect + github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 + github.com/gorilla/mux v1.8.0 + github.com/gorilla/websocket v1.4.2 + github.com/gosnmp/gosnmp v1.33.0 + github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b + github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul v1.2.1 + github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec + github.com/hashicorp/consul/api v1.9.1 + github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-hclog v0.16.2 // indirect + github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect - github.com/hashicorp/memberlist v0.1.5 // indirect - github.com/hashicorp/serf v0.8.1 // indirect - github.com/influxdata/go-syslog/v2 v2.0.1 - github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/serf v0.9.5 // indirect + github.com/influxdata/go-syslog/v3 v3.0.0 + github.com/influxdata/influxdb-observability/common v0.2.8 + github.com/influxdata/influxdb-observability/influx2otel v0.2.8 + github.com/influxdata/influxdb-observability/otel2influx v0.2.8 + github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 - github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect - github.com/jackc/pgx v3.6.0+incompatible + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.5.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.0.1 // indirect + github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect + github.com/jackc/pgtype v1.3.0 // indirect + github.com/jackc/pgx/v4 v4.6.0 + github.com/jaegertracing/jaeger v1.26.0 // indirect + github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca + github.com/jmespath/go-jmespath v0.4.0 + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.11 // indirect github.com/kardianos/service v1.0.0 - github.com/karrick/godirwalk v1.12.0 + github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.9.2 // indirect - github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee + github.com/klauspost/compress v1.13.6 // indirect + github.com/kr/pretty v0.3.0 // indirect + github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect - github.com/lib/pq v1.3.0 // indirect - github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect - github.com/mattn/go-sqlite3 v1.14.0 - github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe - github.com/miekg/dns v1.0.14 - github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.8 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 + github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b + github.com/mdlayher/genetlink v1.0.0 // indirect + github.com/mdlayher/netlink v1.1.0 // indirect + github.com/microsoft/ApplicationInsights-Go v0.4.4 + github.com/miekg/dns v1.1.43 + github.com/minio/highwayhash v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/moby/ipvs v1.0.1 + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.4.1 // indirect + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nats-io/nats-server/v2 v2.1.4 - github.com/nats-io/nats.go v1.9.1 - github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 - github.com/nsqio/go-nsq v1.0.7 + github.com/nats-io/jwt/v2 v2.0.2 // indirect + github.com/nats-io/nats-server/v2 v2.2.6 + github.com/nats-io/nats.go v1.11.0 + github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 + github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/runc v1.0.2 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect - github.com/opentracing/opentracing-go v1.0.2 // indirect - github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/opentracing/opentracing-go v1.2.0 + github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 + github.com/openzipkin/zipkin-go v0.2.5 + github.com/philhofer/fwd v1.1.1 // indirect + github.com/pierrec/lz4 v2.6.0+incompatible // indirect + github.com/pion/dtls/v2 v2.0.9 + github.com/pion/logging v0.2.2 // indirect + github.com/pion/transport v0.12.3 // indirect + github.com/pion/udp v0.1.1 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.5.1 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.9.1 - github.com/prometheus/procfs v0.0.8 + github.com/prometheus/common v0.31.1 + github.com/prometheus/procfs v0.6.0 + github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + github.com/riemann/riemann-go-client v0.5.0 + github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 - github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect + github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect - github.com/shirou/gopsutil v2.20.9+incompatible + github.com/sensu/sensu-go/api/core/v2 v2.9.0 + github.com/shirou/gopsutil v3.21.8+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect - github.com/sirupsen/logrus v1.4.2 - github.com/soniah/gosnmp v1.25.0 - github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 - github.com/stretchr/testify v1.5.1 + github.com/showwin/speedtest-go v1.1.4 + github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect + github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect + github.com/signalfx/golib/v3 v3.3.38 + github.com/signalfx/sapm-proto v0.7.2 // indirect + github.com/sirupsen/logrus v1.8.1 + github.com/sleepinggenius2/gosmi v0.4.3 + github.com/snowflakedb/gosnowflake v1.6.2 + github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect - github.com/tidwall/gjson v1.6.0 - github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect - github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect - github.com/vjeantet/grok v1.0.0 - github.com/vmware/govmomi v0.19.0 - github.com/wavefronthq/wavefront-sdk-go v0.9.2 + github.com/testcontainers/testcontainers-go v0.11.1 + github.com/tidwall/gjson v1.10.2 + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tinylib/msgp v1.1.6 + github.com/tklauser/go-sysconf v0.3.9 // indirect + github.com/tklauser/numcpus v0.3.0 // indirect + github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 + github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 // indirect + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect + github.com/vjeantet/grok v1.0.1 + github.com/vmware/govmomi v0.26.0 + github.com/wavefronthq/wavefront-sdk-go v0.9.7 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect - github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect - go.starlark.net v0.0.0-20200901195727-6e684ef5eeee - golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20200707034311-ab3426394381 - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a - golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 - golang.org/x/text v0.3.3 - golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.0.2 // indirect + github.com/xdg-go/stringprep v1.0.2 // indirect + github.com/xdg/scram v1.0.3 + github.com/xdg/stringprep v1.0.3 // indirect + github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect + github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect + go.etcd.io/etcd/api/v3 v3.5.0 // indirect + go.mongodb.org/mongo-driver v1.5.3 + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.37.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 + go.opentelemetry.io/otel/metric v0.24.0 + go.opentelemetry.io/otel/sdk/metric v0.24.0 + go.starlark.net v0.0.0-20210406145628-7a1108eaa012 + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 + golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect + golang.org/x/text v0.3.7 + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + golang.org/x/tools v0.1.5 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 - gonum.org/v1/gonum v0.6.2 // indirect - google.golang.org/api v0.20.0 - google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 - google.golang.org/grpc v1.28.0 + google.golang.org/api v0.54.0 + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 + google.golang.org/grpc v1.41.0 + google.golang.org/protobuf v1.27.1 + gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect + gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 - gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/ldap.v3 v3.1.0 - gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/olivere/elastic.v5 v5.0.70 - gopkg.in/yaml.v2 v2.2.5 - gotest.tools v2.2.0+incompatible // indirect - honnef.co/go/tools v0.0.1-2020.1.3 // indirect - k8s.io/apimachinery v0.17.1 // indirect + gopkg.in/sourcemap.v1 v1.0.5 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect + gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gotest.tools v2.2.0+incompatible + k8s.io/api v0.22.2 + k8s.io/apimachinery v0.22.2 + k8s.io/client-go v0.22.2 + k8s.io/klog/v2 v2.9.0 // indirect + k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect + modernc.org/cc/v3 v3.33.5 // indirect + modernc.org/ccgo/v3 v3.9.4 // indirect + modernc.org/libc v1.9.5 // indirect + modernc.org/mathutil v1.2.2 // indirect + modernc.org/memory v1.0.4 // indirect + modernc.org/opt v0.1.1 // indirect + modernc.org/sqlite v1.10.8 + modernc.org/strutil v1.1.0 // indirect + modernc.org/token v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect +) + +require ( + github.com/aws/aws-sdk-go v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect + github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect + github.com/cenkalti/backoff/v4 v4.1.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/pierrec/lz4/v4 v4.1.8 // indirect + github.com/rogpeppe/go-internal v1.6.2 // indirect + go.opentelemetry.io/otel v1.0.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.0.1 // indirect + go.opentelemetry.io/otel/sdk/export/metric v0.24.0 // indirect + go.opentelemetry.io/otel/trace v1.0.1 // indirect + go.opentelemetry.io/proto/otlp v0.9.0 // indirect ) // replaced due to https://github.com/satori/go.uuid/issues/73 replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible + +// replaced due to https//github.com/mdlayher/apcupsd/issues/10 +replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e + +//proxy.golang.org has versions of golang.zx2c4.com/wireguard with leading v's, whereas the git repo has tags without leading v's: https://git.zx2c4.com/wireguard-go/refs/tags +//So, fetching this module with version v0.0.20200121 (as done by the transitive dependency +//https://github.com/WireGuard/wgctrl-go/blob/e35592f146e40ce8057113d14aafcc3da231fbac/go.mod#L12 ) was not working when using GOPROXY=direct. +//Replacing with the pseudo-version works around this. +replace golang.zx2c4.com/wireguard v0.0.20200121 => golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 + +// replaced due to open PR updating protobuf https://github.com/cisco-ie/nx-telemetry-proto/pull/1 +replace github.com/cisco-ie/nx-telemetry-proto => github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc + +// replaced due to open PR updating protobuf https://github.com/riemann/riemann-go-client/pull/27 +replace github.com/riemann/riemann-go-client => github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 diff --git a/go.sum b/go.sum index 2c024984b9046..f3bb8cd284287 100644 --- a/go.sum +++ b/go.sum @@ -1,592 +1,2242 @@ +4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/kms v0.1.0 h1:VXAb5OzejDcyhFzIDeZ5n5AUdlsFnCyexuascIwWMj0= +cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= +cloud.google.com/go/monitoring v0.2.0 h1:UFQB1+YbZjAOqAFFY4RlNiOrt19O5HzPeCdtYSlPvmk= +cloud.google.com/go/monitoring v0.2.0/go.mod h1:K/JoZWY3xszHf38AMkzZGx1n5eT1/57ilElGMpESsEE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.17.0 h1:uGzqGUGvaSJ3APz5BmLFw1LpSTnB9o+EzE5fI3rBbJI= +cloud.google.com/go/pubsub v1.17.0/go.mod h1:bBIeYx9ftf/hr7eoSUim6cRaOYZE/hHuigwdwLLByi8= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= +collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6b2NkuZ3B7i6zHA= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= +github.com/Azure/azure-event-hubs-go/v3 v3.3.13/go.mod h1:dJ/WqDn0KEJkNznL9UT/UbXzfmkffCjSNl9x2Y8JI28= +github.com/Azure/azure-kusto-go v0.4.0 h1:CivPswdkVzSXzEjzJTyOJ6e5RhI4IKvaszilyNGvs+A= +github.com/Azure/azure-kusto-go v0.4.0/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= -github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= -github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= -github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-amqp v0.13.12 h1:u/m0QvBgNVlcMqj4bPHxtEyANOzS+cXXndVMYGsC29A= +github.com/Azure/go-amqp v0.13.12/go.mod h1:D5ZrjQqB1dyp1A+G73xeL/kNn7D5qHJIIsNNps7YNmk= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= -github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= -github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= -github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ= -github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21 h1:btRfUDThBE5IKcvI8O8jOiIkujUsAMBSRsYDYmEi6oM= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.24.1 h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI= -github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= +github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU= +github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/go-thrift v0.0.0-20170109061633-7914173639b2/go.mod h1:CxCgO+NdpMdi9SsTlGbc0W+/UNxO3I0AabOEJZ3w61w= +github.com/alecthomas/kong v0.2.1/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI= +github.com/alecthomas/participle v0.4.1 h1:P2PJWzwrSpuCWXKnzqvw0b0phSfH1kJo4p2HvLynVsI= +github.com/alecthomas/participle v0.4.1/go.mod h1:T8u4bQOSMwrkTWOSyt8/jSFPEnRtd0FKFMjVfYBlqPs= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/alecthomas/repr v0.0.0-20210301060118-828286944d6a/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= +github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= +github.com/antchfx/xmlquery v1.3.6 h1:kaEVzH1mNo/2AJZrhZjAaAUTy2Nn2zxGfYYU8jWfXOo= +github.com/antchfx/xmlquery v1.3.6/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= +github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 h1:nPUln5QTzhftSpmld3xcXw/GOJ3z1E8fR8tUrrc0YWk= +github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.15.0 h1:aGvdaR0v1t9XLgjtBYwxcBvBOTMqClzwE26CHOgjW1Y= +github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apex/log v1.6.0/go.mod h1:x7s+P9VtvFBXge9Vbn+8TrqKmuzmD35TTkeBHul8UtY= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4= -github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.3 h1:QCL/le04oAz2jELMRSuJVjGT7H+4hhoQc66eMPCfU/k= +github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.9.2 h1:dUFQcMNZMLON4BOe273pl0filK9RqyQMhCK/6xssL6s= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= +github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= +github.com/aws/aws-sdk-go-v2/config v1.8.3 h1:o5583X4qUfuRrOGOgmOcDgvr5gJVSu57NK08cWAhIDk= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= +github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3 h1:LTdD5QhK073MpElh9umLLP97wxphkgVC/OjQaEbBwZA= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 h1:9tfxW/icbSu98C2pcNynm5jmDwU3/741F11688B6QnU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXezru6PMSp0HUB1m5UfpaRU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.2/go.mod h1:1QsSZvLUuaQ6VJsCXolYCEzV0mVBkNBp64pIJy9yRks= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.2/go.mod h1:1QsSZvLUuaQ6VJsCXolYCEzV0mVBkNBp64pIJy9yRks= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 h1:IM9b6hlCcVFJFydPoyphs/t7YrHfqKy7T4/7AG5Eprs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 h1:leSJ6vCqtPpTmBIgE7044B1wql1E4n//McF+mEgNrYg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 h1:vXZPcDQg7e5z2IKz0huei6zhfAxDoZdXej2o3jUbjCI= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0/go.mod h1:BlrFkwOhSgESkbdS+zJBy4+1mQ3f3Fq9Gp8nT+gaSwk= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 h1:B120/boLr82yRaQFEPn9u01OwWMnc+xGvz5SOHfBrHY= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2/go.mod h1:td1djV1rAzEPcit9L8urGneIi2pYvtI7b/kfMWdpe84= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 h1:SGwKUQaJudQQZE72dDQlL2FGuHNAEK1CyqKLTjh6mqE= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.0.2/go.mod h1:Gej5xRE+MK0r35OnxJJ07iqQ5JC1avTW/4MwGfsC2io= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 h1:QCPbsMPMcM4iGbui5SH6O4uxvZffPoBJ4CIGX7dU0l4= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 h1:r7jel2aa4d9Duys7wEmWqDd5ebpC9w6Kxu6wIjjp18E= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 h1:hb+NupVMUzINGUCfDs2+YqMkWKu47dBIQHpulM0XWh4= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0diUZ0W4R6uZtjYKguMLW2s= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 h1:pZwkxZbspdqRGzddDB92bkZBoB7lg85sMRE7OqdB3V0= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 h1:ol2Y5DWqnJeKqNd8th7JWzBtqu63xpOfs1Is+n1t8/4= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2/go.mod h1:XoDkdZ5pBf2za2GWbFHQ8Ps0K8fRbmbwrHh7PF5xnzQ= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2/go.mod h1:XoDkdZ5pBf2za2GWbFHQ8Ps0K8fRbmbwrHh7PF5xnzQ= +github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= +github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY= -github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= -github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= -github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= +github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= -github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= -github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= -github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/go-couchbase v0.1.0 h1:g4bCvDwRL+ZL6HLhYeRlXxEYP31Wpy0VFxnFw6efEp8= +github.com/couchbase/go-couchbase v0.1.0/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= +github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q7JzdEY= +github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= +github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= +github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= -github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= +github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/devigned/tab v0.0.1/go.mod h1:oVYrfgGyond090gxCvvbjZji79+peOiSV6vhZhKJM0Y= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/devigned/tab/opencensus v0.1.2/go.mod h1:U6xXMXnNwXJpdaK0mnT3zdng4WTi+vCfqn7YHofEv2A= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg= -github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible h1:SiUATuP//KecDjpOK2tvZJgeScYAklvyjfK8JZlU6fo= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k= +github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= -github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 h1:27379cxrsKlr7hAnW/xrusefspUPjqHVRW1K/bZgfGw= +github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60/go.mod h1:8Ia4zp86glrUhC29AAdK9hwTYh8RB6v0WRCtpplYqEg= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 h1:aDtw0/++yjOoiXB9sldaFYW61mK3m6ia/wYWxPLrwYY= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754/go.mod h1:4rS0vfmzOMwfFPhi6Zve4k/59TsBepqd6WESNULE0ho= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 h1:q2Ayh9s6Cr75bS5URiOUAoyFXemgKQaBJphbhAaJHCY= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= +github.com/echlebek/crock v1.0.1 h1:KbzamClMIfVIkkjq/GTXf+N16KylYBpiaTitO3f1ujg= +github.com/echlebek/crock v1.0.1/go.mod h1:/kvwHRX3ZXHj/kHWJkjXDmzzRow54EJuHtQ/PapL/HI= +github.com/echlebek/timeproxy v1.0.0 h1:V41/v8tmmMDNMA2GrBPI45nlXb3F7+OY+nJz1BqKsCk= +github.com/echlebek/timeproxy v1.0.0/go.mod h1:0dg2Lnb8no/jFwoMQKMTU6iAivgoMptGqSTprhnrRtk= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/eclipse/paho.mqtt.golang v1.3.0 h1:MU79lqr3FKNKbSrGN7d7bNYqh8MwWW7Zcx0iG+VIw9I= +github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= -github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= -github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8= -github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-critic/go-critic v0.5.4/go.mod h1:cjB4YGw+n/+X8gREApej7150Uyy1Tg8If6F2XOAUXNE= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.2/go.mod h1:RW6Xcbs6LOyWLU/mXGdzn2Qc+3aj+ASfI7rvSZh1Vls= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= +github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-redis/redis/v8 v8.0.0-beta.6/go.mod h1:g79Vpae8JMzg5qjk8BiwU9tK+HmU3iDVyS4UAJLFycI= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro= github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA= github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= -github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.38.0/go.mod h1:Knp/sd5ATrVp7EOzWzwIIFH+c8hUfpW+oOQb8NvdZDo= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gopcua/opcua v0.1.12 h1:TenluCr1CPB1NHjb9tX6yprc0eUmthznXxSc5mnJPBo= -github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= +github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= +github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosnmp/gosnmp v1.33.0 h1:WNwN5Rj/9Y70VplIKXuaUiYVxdcaXhfAuLElKx4lnpU= +github.com/gosnmp/gosnmp v1.33.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b h1:Y4xqzO0CDNoehCr3ncgie3IgFTO9AzV8PMMEWESFM5c= +github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b/go.mod h1:YaK0rKJenZ74vZFcSSLlAQqtG74PMI68eDjpDCDDmTw= +github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 h1:syBxnRYnSPUDdkdo5U4sy2roxBPQDjNiw4od7xlsABQ= +github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= -github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= -github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg= -github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec h1:ya+kv1eNnd5QhcHuaj5g5eMq5Ra3VCNaPY2ZI7Aq91o= +github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec/go.mod h1:FIT1uhdVv2iXO0l6aACPZSVHxdth7RdmoT34jk9MEm0= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME= +github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= -github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= -github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4= -github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= -github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= -github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 h1:K3A5vHPs/p8OjI4SL3l1+hs/98mhxTVDcV1Ap0c265E= -github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= +github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= +github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= +github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-observability/common v0.2.8 h1:QDvX7rNQkt1mHr2v8sw/OEupa32CxZHlO5f/tsyPCLw= +github.com/influxdata/influxdb-observability/common v0.2.8/go.mod h1:N2wfkPgJvi9CPK6MbNFkD70naEUxAMGCqFyxZXCJQDs= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8 h1:XlVo4WLIFByOADn+88hPmR2SGJkdLppyIbw1BG2obp8= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8/go.mod h1:t9LeYL1mBiVRZBt5TfIj+4MBkJ/1POBxUlKSxEA+uj8= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8 h1:vTamg9mKUXHaXPtydrR1ejpqj/OKAGc56MiedXjlsnA= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8/go.mod h1:xKTR9GLOtkSekysDKhAFNrPYpeiFV31Sy6zDqF54axA= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 h1:0rQOs1VHLVFpAAOIR0mJEvVOIaMYFgYdreeVbgI9sII= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= -github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.5.0 h1:oFSOilzIZkyg787M1fEmyMfOUUvwj0daqYMfaWwNL4o= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1 h1:Rdjp4NFjwHnEslx2b66FfCI2S0LhO4itac3hXz6WX9M= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 h1:Q3tB+ExeflWUW7AFcAhXqk40s9mnNYLk1nOkKNZ5GnU= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.3.0 h1:l8JvKrby3RI7Kg3bYEeU9TA4vqC38QDpFCfcrC7KuN0= +github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.6.0 h1:Fh0O9GdlG4gYpjpwOqjdEodJUQM9jzN3Hdv7PN0xmm0= +github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= +github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= +github.com/jaegertracing/jaeger v1.26.0 h1:4LbUdb9l/Mx83zYvjLbkrayheX+Aga26NEI+feo3xzA= +github.com/jaegertracing/jaeger v1.26.0/go.mod h1:SwHsl1PLZVAdkQTPrziQ+4xV9FxzJXRvTDW1YrUIWEA= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= +github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jingyugao/rowserrcheck v0.0.0-20210130005344-c6a0c12dd98d/go.mod h1:/EZlaYCnEX24i7qdVhT9du5JrtFWYRQr67bVgR7JJC8= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210226073942-60b4fa260dd0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= -github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y= -github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= -github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk= -github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA= -github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= -github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= -github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= +github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20201209171846-0547674fceff/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= -github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= -github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ= +github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.2.6 h1:FPK9wWx9pagxcw14s8W9rlfzfyHm61uNLnJyybZbn48= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.11.0 h1:L263PZkrmkRJRJT2YHU8GwWWvEvmr9/LUKuJTXsF32k= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= -github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= -github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20201221231540-e56b841a3c88/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeODvTb1TBvQ1181aXg3pY= +github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= +github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= +github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= -github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= +github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= -github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= +github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pion/dtls/v2 v2.0.9 h1:7Ow+V++YSZQMYzggI0P9vLJz/hUFcffsfGMfT/Qy+u8= +github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.12.3 h1:vdBfvfU/0Wq8kd2yhUMSDB/x+O4Z9MYVl2fJ5BT4JZw= +github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= +github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= +github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= +github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs= +github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 h1:AHi2TGs09Mv4v688/bjcY2PfAcu9+p4aPvsgVQ4nYDk= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.0/go.mod h1:p2miAhLp6fERzFNbcuQ4bevXs8rgK//uCHsUDkumITg= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210106184943-e47d54850b18/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210115110123-c73ee1cbff1f/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= +github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc h1:9RAsqOFf0U5CuwXR/Jff3nXTv6tAQNN7U4A/2cBRXFc= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc/go.mod h1:rJDd05J5hqWVU9MjJ+5jw1CuLn/jRhvU0xtFEzzqjwM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil v2.20.9+incompatible h1:msXs2frUV+O/JLva9EDLpuJ84PrFsdCTCQex8PUdtkQ= -github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvhwjdr3wSF9Vg= +github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= +github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= +github.com/showwin/speedtest-go v1.1.4/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3mWcNEhUxx6D8rUZumzLV4Wiw= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= +github.com/signalfx/golib/v3 v3.3.38 h1:4EukKPAxVsqlkfaetUv+BpbuJ2l0YeQbwiQg3ADtlzU= +github.com/signalfx/golib/v3 v3.3.38/go.mod h1:J7vY30VdC39CSin5ZRIrThnkyNW8x1fnJGD+NBW4LuY= +github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= +github.com/signalfx/sapm-proto v0.7.2 h1:iM/y3gezQm1/j7JBS0gXhEJ8ROeneb6DY7n0OcnvLks= +github.com/signalfx/sapm-proto v0.7.2/go.mod h1:HLufOh6Gd2altGxbeve+s6hh0EWCWoOM7MmuYuvs5PI= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= -github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= +github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/snowflakedb/gosnowflake v1.6.2 h1:drZkX7Ve3qr3lLD/f0vxwesgJZfNerivknAvPRAMy88= +github.com/snowflakedb/gosnowflake v1.6.2/go.mod h1:k1Wq+O8dRD/jmFBLyStEv2OrgHoMFQpqHCRSy70P0dI= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ= -github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= +github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= +github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= +github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.0.1/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck v0.0.0-20201130113247-1683564d9756/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= +github.com/tommy-muehle/go-mnd/v2 v2.3.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= +github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= +github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= +github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= -github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= -github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= -github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= -github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= +github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= +github.com/vmware/govmomi v0.26.0 h1:JMZR5c7MHH3nCEAVYS3WyRIA35W3+b3tLwAqxVzq1Rw= +github.com/vmware/govmomi v0.26.0/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= +github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= +github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= +github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e h1:oIpIX9VKxSCFrfjsKpluGbNPBGq9iNnT9crH781j9wY= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.5.3 h1:wWbFB6zaGHpzguF3f7tW94sVE8sFl3lHx8OZx/4OuFI= +go.mongodb.org/mongo-driver v1.5.3/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20200901195727-6e684ef5eeee h1:N4eRtIIYHZE5Mw/Km/orb+naLdwAe+lv2HCxRR5rEBw= -go.starlark.net v0.0.0-20200901195727-6e684ef5eeee/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= +go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= +go.opentelemetry.io/collector/model v0.37.0 h1:K1G6bgzBZ5kKSjZ1+EY9MhCOYsac4Q1K85fBUgpTVH8= +go.opentelemetry.io/collector/model v0.37.0/go.mod h1:ESh1oWDNdS4fTg9sTFoYuiuvs8QuaX8yNGTPix3JZc8= +go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= +go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 h1:NN6n2agAkT6j2o+1RPTFANclOnZ/3Z1ruRGL06NYACk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0/go.mod h1:kgWmavsno59/h5l9A9KXhvqrYxBhiQvJHPNhJkMP46s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 h1:QyIh7cAMItlzm8xQn9c6QxNEMUbYgXPx19irR/pmgdI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0/go.mod h1:BpCT1zDnUgcUc3VqFVkxH/nkx6cM8XlCPsQsxaOzUNM= +go.opentelemetry.io/otel/internal/metric v0.24.0 h1:O5lFy6kAl0LMWBjzy3k//M8VjEaTDWL9DPJuqZmWIAA= +go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk= +go.opentelemetry.io/otel/metric v0.24.0 h1:Rg4UYHS6JKR1Sw1TxnI13z7q/0p/XAbgIqUTagvLJuU= +go.opentelemetry.io/otel/metric v0.24.0/go.mod h1:tpMFnCD9t+BEGiWY2bWF5+AwjuAdM0lSowQ4SBA3/K4= +go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0 h1:innKi8LQebwPI+WEuEKEWMjhWC5mXQG1/WpSm5mffSY= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0/go.mod h1:chmxXGVNcpCih5XyniVkL4VUyaEroUbOdvjVlQ8M29Y= +go.opentelemetry.io/otel/sdk/metric v0.24.0 h1:LLHrZikGdEHoHihwIPvfFRJX+T+NdrU2zgEqf7tQ7Oo= +go.opentelemetry.io/otel/sdk/metric v0.24.0/go.mod h1:KDgJgYzsIowuIDbPM9sLDZY9JJ6gqIDWCx92iWV8ejk= +go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= +go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -594,16 +2244,25 @@ golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -613,125 +2272,347 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 h1:G64nFNerDErBd2KdvHvIn3Ee6ccUQBTfhDZEO0DccfU= +golang.org/x/net v0.0.0-20211005215030-d2e5035098b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a h1:4Kd8OPUx1xgUwrHDaviWZO8MsgoZTZYC3g+8m16RBww= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -739,28 +2620,96 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200203023011-6f24f261dadb/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210102185154-773b96fafca2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 h1:LJ5Rrj8y0yBul+KpB2v9dFhYuHRs1s9caVu4VK6MgMo= +golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -770,106 +2719,341 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 h1:Ogdiaj9EMVKYHnDsESxwlTr/k5eqCdwoQVJEcdg0NbE= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM= +gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/fsnotify.v1 v1.2.1/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4= -gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= +gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.1/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/cc/v3 v3.33.5 h1:gfsIOmcv80EelyQyOHn/Xhlzex8xunhQxWiJRMYmPrI= +modernc.org/cc/v3 v3.33.5/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= +modernc.org/ccgo/v3 v3.9.4 h1:mt2+HyTZKxva27O6T4C9//0xiNQ/MornL3i8itM5cCs= +modernc.org/ccgo/v3 v3.9.4/go.mod h1:19XAY9uOrYnDhOgfHwCABasBvK69jgC4I8+rizbk3Bc= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.5 h1:zv111ldxmP7DJ5mOIqzRbza7ZDl3kh4ncKfASB2jIYY= +modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.4 h1:utMBrFcpnQDdNsmM6asmyH/FM9TqLPS7XF7otpJmrwM= +modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= +modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.10.8 h1:tZzV+/FwlSBddiJAHLR+qxsw2nx7jpLMKOCVu6NTjxI= +modernc.org/sqlite v1.10.8/go.mod h1:k45BYY2DU82vbS/dJ24OzHCtjPeMEcZ1DV2POiE8nRs= +modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/tcl v1.5.2 h1:sYNjGr4zK6cDH74USl8wVJRrvDX6UOLpG0j4lFvR0W0= +modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= +modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc= +modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +mvdan.cc/gofumpt v0.1.0/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/info.plist b/info.plist new file mode 100644 index 0000000000000..e1267df8c1788 --- /dev/null +++ b/info.plist @@ -0,0 +1,16 @@ + + + + + CFBundleExecutable + telegraf_entry_mac + CFBundleIconFile + icon.icns + CFBundleIdentifier + com.influxdata.telegraf + NSHighResolutionCapable + + LSUIElement + + + \ No newline at end of file diff --git a/internal/content_coding.go b/internal/content_coding.go index daefa20eea633..b1a30bde1bfe1 100644 --- a/internal/content_coding.go +++ b/internal/content_coding.go @@ -65,7 +65,6 @@ func (r *GzipReader) Read(b []byte) (int, error) { return n, nil } return n, err - } // NewContentEncoder returns a ContentEncoder for the encoding type. diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go index 85496df59c5b6..06235a63879a9 100644 --- a/internal/content_coding_test.go +++ b/internal/content_coding_test.go @@ -2,7 +2,7 @@ package internal import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -68,7 +68,7 @@ func TestStreamIdentityDecode(t *testing.T) { dec, err := NewStreamContentDecoder("identity", &r) require.NoError(t, err) - data, err := ioutil.ReadAll(dec) + data, err := io.ReadAll(dec) require.NoError(t, err) require.Equal(t, []byte("howdy"), data) diff --git a/internal/exec_unix.go b/internal/exec_unix.go index d41aae825d6d5..0f5d3fca037db 100644 --- a/internal/exec_unix.go +++ b/internal/exec_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal @@ -50,7 +51,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { // If SIGTERM was sent then treat any process error as a timeout. if termSent { - return TimeoutErr + return ErrTimeout } // Otherwise there was an error unrelated to termination. diff --git a/internal/exec_windows.go b/internal/exec_windows.go index f010bdd96756b..708051dda3a2c 100644 --- a/internal/exec_windows.go +++ b/internal/exec_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal @@ -33,7 +34,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { // If SIGTERM was sent then treat any process error as a timeout. if termSent { - return TimeoutErr + return ErrTimeout } // Otherwise there was an error unrelated to termination. diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index d4e7ffd8743bd..fb49c232ecc0b 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -5,8 +5,8 @@ import ( "path/filepath" "strings" + "github.com/bmatcuk/doublestar/v3" "github.com/gobwas/glob" - "github.com/karrick/godirwalk" ) type GlobPath struct { @@ -45,42 +45,13 @@ func Compile(path string) (*GlobPath, error) { // If it's a static path, returns path. // All returned path will have the host platform separator. func (g *GlobPath) Match() []string { - if !g.hasMeta { - return []string{g.path} - } - if !g.HasSuperMeta { - files, _ := filepath.Glob(g.path) - return files - } - roots, err := filepath.Glob(g.rootGlob) - if err != nil { - return []string{} - } - out := []string{} - walkfn := func(path string, _ *godirwalk.Dirent) error { - if g.g.Match(path) { - out = append(out, path) - } - return nil + // This string replacement is for backwards compatibility support + // The original implemention allowed **.txt but the double star package requires **/**.txt + g.path = strings.ReplaceAll(g.path, "**/**", "**") + g.path = strings.ReplaceAll(g.path, "**", "**/**") - } - for _, root := range roots { - fileinfo, err := os.Stat(root) - if err != nil { - continue - } - if !fileinfo.IsDir() { - if g.MatchString(root) { - out = append(out, root) - } - continue - } - godirwalk.Walk(root, &godirwalk.Options{ - Callback: walkfn, - Unsorted: true, - }) - } - return out + files, _ := doublestar.Glob(g.path) + return files } // MatchString tests the path string against the glob. The path should contain @@ -113,10 +84,10 @@ func (g *GlobPath) GetRoots() []string { // hasMeta reports whether path contains any magic glob characters. func hasMeta(path string) bool { - return strings.IndexAny(path, "*?[") >= 0 + return strings.ContainsAny(path, "*?[") } // hasSuperMeta reports whether path contains any super magic glob characters (**). func hasSuperMeta(path string) bool { - return strings.Index(path, "**") >= 0 + return strings.Contains(path, "**") } diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 60562d8f8f1ae..bc286bc75419e 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -1,53 +1,70 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package globpath import ( + "os" + "path/filepath" "runtime" - "strings" "testing" "github.com/stretchr/testify/require" ) +var ( + testdataDir = getTestdataDir() +) + func TestCompileAndMatch(t *testing.T) { - dir := getTestdataDir() - // test super asterisk - g1, err := Compile(dir + "/**") - require.NoError(t, err) - // test single asterisk - g2, err := Compile(dir + "/*.log") - require.NoError(t, err) - // test no meta characters (file exists) - g3, err := Compile(dir + "/log1.log") - require.NoError(t, err) - // test file that doesn't exist - g4, err := Compile(dir + "/i_dont_exist.log") - require.NoError(t, err) - // test super asterisk that doesn't exist - g5, err := Compile(dir + "/dir_doesnt_exist/**") - require.NoError(t, err) + type test struct { + path string + matches int + } - matches := g1.Match() - require.Len(t, matches, 6) - matches = g2.Match() - require.Len(t, matches, 2) - matches = g3.Match() - require.Len(t, matches, 1) - matches = g4.Match() - require.Len(t, matches, 1) - matches = g5.Match() - require.Len(t, matches, 0) + tests := []test{ + //test super asterisk + {path: filepath.Join(testdataDir, "**"), matches: 7}, + // test single asterisk + {path: filepath.Join(testdataDir, "*.log"), matches: 3}, + // test no meta characters (file exists) + {path: filepath.Join(testdataDir, "log1.log"), matches: 1}, + // test file that doesn't exist + {path: filepath.Join(testdataDir, "i_dont_exist.log"), matches: 0}, + // test super asterisk that doesn't exist + {path: filepath.Join(testdataDir, "dir_doesnt_exist", "**"), matches: 0}, + // test exclamation mark creates non-matching list with a range + {path: filepath.Join(testdataDir, "log[!1-2]*"), matches: 1}, + // test caret creates non-matching list + {path: filepath.Join(testdataDir, "log[^1-2]*"), matches: 1}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log[!2]*"), matches: 2}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log\\[!*"), matches: 1}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log\\[^*"), matches: 0}, + } + + for _, tc := range tests { + g, err := Compile(tc.path) + require.Nil(t, err) + matches := g.Match() + require.Len(t, matches, tc.matches) + } } func TestRootGlob(t *testing.T) { - dir := getTestdataDir() tests := []struct { input string output string }{ - {dir + "/**", dir + "/*"}, - {dir + "/nested?/**", dir + "/nested?/*"}, - {dir + "/ne**/nest*", dir + "/ne*"}, - {dir + "/nested?/*", ""}, + {filepath.Join(testdataDir, "**"), filepath.Join(testdataDir, "*")}, + {filepath.Join(testdataDir, "nested?", "**"), filepath.Join(testdataDir, "nested?", "*")}, + {filepath.Join(testdataDir, "ne**", "nest*"), filepath.Join(testdataDir, "ne*")}, + {filepath.Join(testdataDir, "nested?", "*"), ""}, } for _, test := range tests { @@ -57,26 +74,24 @@ func TestRootGlob(t *testing.T) { } func TestFindNestedTextFile(t *testing.T) { - dir := getTestdataDir() // test super asterisk - g1, err := Compile(dir + "/**.txt") + g1, err := Compile(filepath.Join(testdataDir, "**.txt")) require.NoError(t, err) matches := g1.Match() require.Len(t, matches, 1) } -func getTestdataDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "globpath_test.go", "testdata", 1) -} - func TestMatch_ErrPermission(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping Unix only test") + } + tests := []struct { input string expected []string }{ - {"/root/foo", []string{"/root/foo"}}, + {"/root/foo", []string(nil)}, {"/root/f*", []string(nil)}, } @@ -98,3 +113,13 @@ func TestWindowsSeparator(t *testing.T) { ok := glob.MatchString("testdata\\nested1") require.True(t, ok) } + +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") +} diff --git a/internal/globpath/testdata/log[!.log b/internal/globpath/testdata/log[!.log new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go index 23d8634c46520..65fcee418e388 100644 --- a/internal/goplugin/noplugin.go +++ b/internal/goplugin/noplugin.go @@ -1,9 +1,10 @@ +//go:build !goplugin // +build !goplugin package goplugin import "errors" -func LoadExternalPlugins(rootDir string) error { +func LoadExternalPlugins(_ string) error { return errors.New("go plugin support is not enabled") } diff --git a/internal/goplugin/plugin.go b/internal/goplugin/plugin.go index 7e58ec32e92c2..3af051833b6a7 100644 --- a/internal/goplugin/plugin.go +++ b/internal/goplugin/plugin.go @@ -1,3 +1,4 @@ +//go:build goplugin // +build goplugin package goplugin diff --git a/internal/http.go b/internal/http.go index 1c3dd49577557..12adfe729df34 100644 --- a/internal/http.go +++ b/internal/http.go @@ -37,7 +37,6 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) if !ok || subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 || subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 { - rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"") h.onError(rw) http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) @@ -73,7 +72,6 @@ func (h *genericAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request // Scheme checking authorization := req.Header.Get("Authorization") if subtle.ConstantTimeCompare([]byte(authorization), []byte(h.credentials)) != 1 { - h.onError(rw) http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) return diff --git a/internal/internal.go b/internal/internal.go index 777128f667bf6..4441e9acfbf03 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -2,7 +2,6 @@ package internal import ( "bufio" - "bytes" "compress/gzip" "context" "errors" @@ -19,37 +18,19 @@ import ( "syscall" "time" "unicode" - - "github.com/alecthomas/units" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" var ( - TimeoutErr = errors.New("Command timed out.") - - NotImplementedError = errors.New("not implemented yet") - - VersionAlreadySetError = errors.New("version has already been set") + ErrTimeout = errors.New("command timed out") + ErrorNotImplemented = errors.New("not implemented yet") + ErrorVersionAlreadySet = errors.New("version has already been set") ) // Set via the main module var version string -// Duration just wraps time.Duration -type Duration struct { - Duration time.Duration -} - -// Size just wraps an int64 -type Size struct { - Size int64 -} - -type Number struct { - Value float64 -} - type ReadWaitCloser struct { pipeReader *io.PipeReader wg sync.WaitGroup @@ -58,7 +39,7 @@ type ReadWaitCloser struct { // SetVersion sets the telegraf agent version func SetVersion(v string) error { if version != "" { - return VersionAlreadySetError + return ErrorVersionAlreadySet } version = v return nil @@ -75,72 +56,6 @@ func ProductToken() string { Version(), strings.TrimPrefix(runtime.Version(), "go")) } -// UnmarshalTOML parses the duration from the TOML config file -func (d *Duration) UnmarshalTOML(b []byte) error { - var err error - b = bytes.Trim(b, `'`) - - // see if we can directly convert it - d.Duration, err = time.ParseDuration(string(b)) - if err == nil { - return nil - } - - // Parse string duration, ie, "1s" - if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 { - d.Duration, err = time.ParseDuration(uq) - if err == nil { - return nil - } - } - - // First try parsing as integer seconds - sI, err := strconv.ParseInt(string(b), 10, 64) - if err == nil { - d.Duration = time.Second * time.Duration(sI) - return nil - } - // Second try parsing as float seconds - sF, err := strconv.ParseFloat(string(b), 64) - if err == nil { - d.Duration = time.Second * time.Duration(sF) - return nil - } - - return nil -} - -func (s *Size) UnmarshalTOML(b []byte) error { - var err error - b = bytes.Trim(b, `'`) - - val, err := strconv.ParseInt(string(b), 10, 64) - if err == nil { - s.Size = val - return nil - } - uq, err := strconv.Unquote(string(b)) - if err != nil { - return err - } - val, err = units.ParseStrictBytes(uq) - if err != nil { - return err - } - s.Size = val - return nil -} - -func (n *Number) UnmarshalTOML(b []byte) error { - value, err := strconv.ParseFloat(string(b), 64) - if err != nil { - return err - } - - n.Value = value - return nil -} - // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { @@ -382,8 +297,25 @@ func parseComponents(timestamp interface{}) (int64, int64, error) { return 0, 0, err } return integer, 0, nil + case int8: + return int64(ts), 0, nil + case int16: + return int64(ts), 0, nil + case int32: + return int64(ts), 0, nil case int64: return ts, 0, nil + case uint8: + return int64(ts), 0, nil + case uint16: + return int64(ts), 0, nil + case uint32: + return int64(ts), 0, nil + case uint64: + return int64(ts), 0, nil + case float32: + integer, fractional := math.Modf(float64(ts)) + return int64(integer), int64(fractional * 1e9), nil case float64: integer, fractional := math.Modf(ts) return int64(integer), int64(fractional * 1e9), nil @@ -417,6 +349,36 @@ func parseTime(format string, timestamp interface{}, location string) (time.Time if err != nil { return time.Unix(0, 0), err } + switch strings.ToLower(format) { + case "ansic": + format = time.ANSIC + case "unixdate": + format = time.UnixDate + case "rubydate": + format = time.RubyDate + case "rfc822": + format = time.RFC822 + case "rfc822z": + format = time.RFC822Z + case "rfc850": + format = time.RFC850 + case "rfc1123": + format = time.RFC1123 + case "rfc1123z": + format = time.RFC1123Z + case "rfc3339": + format = time.RFC3339 + case "rfc3339nano": + format = time.RFC3339Nano + case "stamp": + format = time.Stamp + case "stampmilli": + format = time.StampMilli + case "stampmicro": + format = time.StampMicro + case "stampnano": + format = time.StampNano + } return time.ParseInLocation(format, ts, loc) default: return time.Unix(0, 0), errors.New("unsupported type") diff --git a/internal/internal_test.go b/internal/internal_test.go index 25f0503ba20a8..24fdb91bb2ebc 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "crypto/rand" "io" - "io/ioutil" "log" "os/exec" "regexp" @@ -46,15 +45,14 @@ func TestSnakeCase(t *testing.T) { } var ( - sleepbin, _ = exec.LookPath("sleep") + sleepbin, _ = exec.LookPath("sleep") //nolint:unused // Used in skipped tests echobin, _ = exec.LookPath("echo") shell, _ = exec.LookPath("sh") ) func TestRunTimeout(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to random failures.") - } + t.Skip("Skipping test due to random failures & a data race when running test-all.") + if sleepbin == "" { t.Skip("'sleep' binary not available on OS, skipping.") } @@ -63,7 +61,7 @@ func TestRunTimeout(t *testing.T) { err := RunTimeout(cmd, time.Millisecond*20) elapsed := time.Since(start) - assert.Equal(t, TimeoutErr, err) + assert.Equal(t, ErrTimeout, err) // Verify that command gets killed in 20ms, with some breathing room assert.True(t, elapsed < time.Millisecond*75) } @@ -103,7 +101,7 @@ func TestCombinedOutputTimeout(t *testing.T) { _, err := CombinedOutputTimeout(cmd, time.Millisecond*20) elapsed := time.Since(start) - assert.Equal(t, TimeoutErr, err) + assert.Equal(t, ErrTimeout, err) // Verify that command gets killed in 20ms, with some breathing room assert.True(t, elapsed < time.Millisecond*75) } @@ -172,52 +170,6 @@ func TestRandomSleep(t *testing.T) { assert.True(t, elapsed < time.Millisecond*150) } -func TestDuration(t *testing.T) { - var d Duration - - d.UnmarshalTOML([]byte(`"1s"`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`1s`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`'1s'`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`10`)) - assert.Equal(t, 10*time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`1.5`)) - assert.Equal(t, time.Second, d.Duration) -} - -func TestSize(t *testing.T) { - var s Size - - s.UnmarshalTOML([]byte(`"1B"`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`1`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`'1'`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`"1GB"`)) - assert.Equal(t, int64(1000*1000*1000), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`"12GiB"`)) - assert.Equal(t, int64(12*1024*1024*1024), s.Size) -} - func TestCompressWithGzip(t *testing.T) { testData := "the quick brown fox jumps over the lazy dog" inputBuffer := bytes.NewBuffer([]byte(testData)) @@ -229,7 +181,7 @@ func TestCompressWithGzip(t *testing.T) { assert.NoError(t, err) defer gzipReader.Close() - output, err := ioutil.ReadAll(gzipReader) + output, err := io.ReadAll(gzipReader) assert.NoError(t, err) assert.Equal(t, testData, string(output)) @@ -250,7 +202,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { rc, err := CompressWithGzip(mr) assert.NoError(t, err) - n, err := io.CopyN(ioutil.Discard, rc, 10000) + n, err := io.CopyN(io.Discard, rc, 10000) assert.NoError(t, err) assert.Equal(t, int64(10000), n) @@ -258,7 +210,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { err = rc.Close() assert.NoError(t, err) - n, err = io.CopyN(ioutil.Discard, rc, 10000) + n, err = io.CopyN(io.Discard, rc, 10000) assert.Error(t, io.EOF, err) assert.Equal(t, int64(0), n) @@ -274,7 +226,7 @@ func TestVersionAlreadySet(t *testing.T) { err = SetVersion("bar") assert.Error(t, err) - assert.IsType(t, VersionAlreadySetError, err) + assert.IsType(t, ErrorVersionAlreadySet, err) assert.Equal(t, "foo", Version()) } @@ -368,9 +320,84 @@ func TestAlignTime(t *testing.T) { func TestParseTimestamp(t *testing.T) { rfc3339 := func(value string) time.Time { tm, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - panic(err) - } + require.NoError(t, err) + return tm + } + ansic := func(value string) time.Time { + tm, err := time.Parse(time.ANSIC, value) + require.NoError(t, err) + return tm + } + + unixdate := func(value string) time.Time { + tm, err := time.Parse(time.UnixDate, value) + require.NoError(t, err) + return tm + } + + rubydate := func(value string) time.Time { + tm, err := time.Parse(time.RubyDate, value) + require.NoError(t, err) + return tm + } + + rfc822 := func(value string) time.Time { + tm, err := time.Parse(time.RFC822, value) + require.NoError(t, err) + return tm + } + + rfc822z := func(value string) time.Time { + tm, err := time.Parse(time.RFC822Z, value) + require.NoError(t, err) + return tm + } + + rfc850 := func(value string) time.Time { + tm, err := time.Parse(time.RFC850, value) + require.NoError(t, err) + return tm + } + + rfc1123 := func(value string) time.Time { + tm, err := time.Parse(time.RFC1123, value) + require.NoError(t, err) + return tm + } + + rfc1123z := func(value string) time.Time { + tm, err := time.Parse(time.RFC1123Z, value) + require.NoError(t, err) + return tm + } + + rfc3339nano := func(value string) time.Time { + tm, err := time.Parse(time.RFC3339Nano, value) + require.NoError(t, err) + return tm + } + + stamp := func(value string) time.Time { + tm, err := time.Parse(time.Stamp, value) + require.NoError(t, err) + return tm + } + + stampmilli := func(value string) time.Time { + tm, err := time.Parse(time.StampMilli, value) + require.NoError(t, err) + return tm + } + + stampmicro := func(value string) time.Time { + tm, err := time.Parse(time.StampMicro, value) + require.NoError(t, err) + return tm + } + + stampnano := func(value string) time.Time { + tm, err := time.Parse(time.StampNano, value) + require.NoError(t, err) return tm } @@ -468,6 +495,111 @@ func TestParseTimestamp(t *testing.T) { timestamp: "1568338208000000500", expected: rfc3339("2019-09-13T01:30:08.000000500Z"), }, + { + name: "rfc339 test", + format: "RFC3339", + timestamp: "2018-10-26T13:30:33Z", + expected: rfc3339("2018-10-26T13:30:33Z"), + }, + + { + name: "ANSIC", + format: "ANSIC", + timestamp: "Mon Jan 2 15:04:05 2006", + expected: ansic("Mon Jan 2 15:04:05 2006"), + }, + + { + name: "UnixDate", + format: "UnixDate", + timestamp: "Mon Jan 2 15:04:05 MST 2006", + expected: unixdate("Mon Jan 2 15:04:05 MST 2006"), + location: "Local", + }, + + { + name: "RubyDate", + format: "RubyDate", + timestamp: "Mon Jan 02 15:04:05 -0700 2006", + expected: rubydate("Mon Jan 02 15:04:05 -0700 2006"), + location: "Local", + }, + + { + name: "RFC822", + format: "RFC822", + timestamp: "02 Jan 06 15:04 MST", + expected: rfc822("02 Jan 06 15:04 MST"), + location: "Local", + }, + + { + name: "RFC822Z", + format: "RFC822Z", + timestamp: "02 Jan 06 15:04 -0700", + expected: rfc822z("02 Jan 06 15:04 -0700"), + location: "Local", + }, + + { + name: "RFC850", + format: "RFC850", + timestamp: "Monday, 02-Jan-06 15:04:05 MST", + expected: rfc850("Monday, 02-Jan-06 15:04:05 MST"), + location: "Local", + }, + + { + name: "RFC1123", + format: "RFC1123", + timestamp: "Mon, 02 Jan 2006 15:04:05 MST", + expected: rfc1123("Mon, 02 Jan 2006 15:04:05 MST"), + location: "Local", + }, + + { + name: "RFC1123Z", + format: "RFC1123Z", + timestamp: "Mon, 02 Jan 2006 15:04:05 -0700", + expected: rfc1123z("Mon, 02 Jan 2006 15:04:05 -0700"), + location: "Local", + }, + + { + name: "RFC3339Nano", + format: "RFC3339Nano", + timestamp: "2006-01-02T15:04:05.999999999-07:00", + expected: rfc3339nano("2006-01-02T15:04:05.999999999-07:00"), + location: "Local", + }, + + { + name: "Stamp", + format: "Stamp", + timestamp: "Jan 2 15:04:05", + expected: stamp("Jan 2 15:04:05"), + }, + + { + name: "StampMilli", + format: "StampMilli", + timestamp: "Jan 2 15:04:05.000", + expected: stampmilli("Jan 2 15:04:05.000"), + }, + + { + name: "StampMicro", + format: "StampMicro", + timestamp: "Jan 2 15:04:05.000000", + expected: stampmicro("Jan 2 15:04:05.000000"), + }, + + { + name: "StampNano", + format: "StampNano", + timestamp: "Jan 2 15:04:05.000000000", + expected: stampnano("Jan 2 15:04:05.000000000"), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/process/process.go b/internal/process/process.go index 3f88aac57b317..3bfc3bb7e44e6 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os/exec" "sync" "sync/atomic" @@ -126,12 +125,12 @@ func (p *Process) cmdLoop(ctx context.Context) error { } p.Log.Errorf("Process %s exited: %v", p.Cmd.Path, err) - p.Log.Infof("Restarting in %s...", time.Duration(p.RestartDelay)) + p.Log.Infof("Restarting in %s...", p.RestartDelay) select { case <-ctx.Done(): return nil - case <-time.After(time.Duration(p.RestartDelay)): + case <-time.After(p.RestartDelay): // Continue the loop and restart the process if err := p.cmdStart(); err != nil { return err @@ -187,5 +186,5 @@ func isQuitting(ctx context.Context) bool { } func defaultReadPipe(r io.Reader) { - io.Copy(ioutil.Discard, r) + _, _ = io.Copy(io.Discard, r) } diff --git a/internal/process/process_posix.go b/internal/process/process_posix.go index 7b42b7da13214..8f736bc673592 100644 --- a/internal/process/process_posix.go +++ b/internal/process/process_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process diff --git a/internal/process/process_test.go b/internal/process/process_test.go index 7a7c8c6f33fd6..228f2f1e1b28d 100644 --- a/internal/process/process_test.go +++ b/internal/process/process_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process @@ -67,7 +68,7 @@ func TestMain(m *testing.M) { // externalProcess is an external "misbehaving" process that won't exit // cleanly. func externalProcess() { - wait := make(chan int, 0) + wait := make(chan int) fmt.Fprintln(os.Stdout, "started") <-wait os.Exit(2) diff --git a/internal/process/process_windows.go b/internal/process/process_windows.go index 0995d52469b07..3aefd20f4aa9c 100644 --- a/internal/process/process_windows.go +++ b/internal/process/process_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package process diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index a167b7cb78f7e..7cfde02692cd4 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -123,10 +123,7 @@ func (w *FileWriter) openCurrent() (err error) { w.bytesWritten = fileInfo.Size() } - if err = w.rotateIfNeeded(); err != nil { - return err - } - return nil + return w.rotateIfNeeded() } func (w *FileWriter) rotateIfNeeded() error { @@ -153,11 +150,7 @@ func (w *FileWriter) rotate() (err error) { return err } - if err = w.purgeArchivesIfNeeded(); err != nil { - return err - } - - return nil + return w.purgeArchivesIfNeeded() } func (w *FileWriter) purgeArchivesIfNeeded() (err error) { diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go index ca29b9a2f45d6..2d249d74548e1 100644 --- a/internal/rotate/file_writer_test.go +++ b/internal/rotate/file_writer_test.go @@ -1,7 +1,6 @@ package rotate import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -12,7 +11,7 @@ import ( ) func TestFileWriter_NoRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationNo") + tempDir, err := os.MkdirTemp("", "RotationNo") require.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0) require.NoError(t, err) @@ -22,12 +21,12 @@ func TestFileWriter_NoRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) } func TestFileWriter_TimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1) @@ -39,28 +38,28 @@ func TestFileWriter_TimeRotation(t *testing.T) { time.Sleep(1 * time.Second) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenTimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) time.Sleep(1 * time.Second) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1) require.NoError(t, err) defer func() { writer.Close(); os.RemoveAll(tempDir) }() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_SizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(9) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) @@ -71,16 +70,16 @@ func TestFileWriter_SizeRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenSizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(12) filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) require.NoError(t, err) @@ -88,12 +87,12 @@ func TestFileWriter_ReopenSizeRotation(t *testing.T) { _, err = writer.Write([]byte("Hello World Again")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_DeleteArchives(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") + tempDir, err := os.MkdirTemp("", "RotationDeleteArchives") require.NoError(t, err) maxSize := int64(5) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2) @@ -112,14 +111,14 @@ func TestFileWriter_DeleteArchives(t *testing.T) { _, err = writer.Write([]byte("Third file")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 3, len(files)) for _, tempFile := range files { var bytes []byte var err error path := filepath.Join(tempDir, tempFile.Name()) - if bytes, err = ioutil.ReadFile(path); err != nil { + if bytes, err = os.ReadFile(path); err != nil { t.Error(err.Error()) return } @@ -133,7 +132,7 @@ func TestFileWriter_DeleteArchives(t *testing.T) { } func TestFileWriter_CloseRotates(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationClose") + tempDir, err := os.MkdirTemp("", "RotationClose") require.NoError(t, err) defer os.RemoveAll(tempDir) maxSize := int64(9) @@ -142,7 +141,7 @@ func TestFileWriter_CloseRotates(t *testing.T) { writer.Close() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name()) } diff --git a/internal/snmp/config.go b/internal/snmp/config.go index e616e75709737..0a200b7067787 100644 --- a/internal/snmp/config.go +++ b/internal/snmp/config.go @@ -1,13 +1,13 @@ package snmp import ( - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) type ClientConfig struct { // Timeout to wait for a response. - Timeout internal.Duration `toml:"timeout"` - Retries int `toml:"retries"` + Timeout config.Duration `toml:"timeout"` + Retries int `toml:"retries"` // Values: 1, 2, 3 Version uint8 `toml:"version"` @@ -15,7 +15,7 @@ type ClientConfig struct { Community string `toml:"community"` // Parameters for Version 2 & 3 - MaxRepetitions uint8 `toml:"max_repetitions"` + MaxRepetitions uint32 `toml:"max_repetitions"` // Parameters for Version 3 ContextName string `toml:"context_name"` diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index 23a15594ed6f7..9220098e37f73 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -5,8 +5,9 @@ import ( "net/url" "strconv" "strings" + "time" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" ) // GosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection. @@ -15,27 +16,27 @@ type GosnmpWrapper struct { } // Host returns the value of GoSNMP.Target. -func (gsw GosnmpWrapper) Host() string { - return gsw.Target +func (gs GosnmpWrapper) Host() string { + return gs.Target } // Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the // connection is using SNMPv1 or newer. // Also, if any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { +func (gs GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { var err error // On error, retry once. // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function. for i := 0; i < 2; i++ { - if gsw.Version == gosnmp.Version1 { - err = gsw.GoSNMP.Walk(oid, fn) + if gs.Version == gosnmp.Version1 { + err = gs.GoSNMP.Walk(oid, fn) } else { - err = gsw.GoSNMP.BulkWalk(oid, fn) + err = gs.GoSNMP.BulkWalk(oid, fn) } if err == nil { return nil } - if err := gsw.GoSNMP.Connect(); err != nil { + if err := gs.GoSNMP.Connect(); err != nil { return fmt.Errorf("reconnecting: %w", err) } } @@ -44,15 +45,15 @@ func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { // Get wraps GoSNMP.GET(). // If any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { +func (gs GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { var err error var pkt *gosnmp.SnmpPacket for i := 0; i < 2; i++ { - pkt, err = gsw.GoSNMP.Get(oids) + pkt, err = gs.GoSNMP.Get(oids) if err == nil { return pkt, nil } - if err := gsw.GoSNMP.Connect(); err != nil { + if err := gs.GoSNMP.Connect(); err != nil { return nil, fmt.Errorf("reconnecting: %w", err) } } @@ -62,7 +63,7 @@ func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { gs := GosnmpWrapper{&gosnmp.GoSNMP{}} - gs.Timeout = s.Timeout.Duration + gs.Timeout = time.Duration(s.Timeout) gs.Retries = s.Retries @@ -112,6 +113,14 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { sp.AuthenticationProtocol = gosnmp.MD5 case "sha": sp.AuthenticationProtocol = gosnmp.SHA + case "sha224": + sp.AuthenticationProtocol = gosnmp.SHA224 + case "sha256": + sp.AuthenticationProtocol = gosnmp.SHA256 + case "sha384": + sp.AuthenticationProtocol = gosnmp.SHA384 + case "sha512": + sp.AuthenticationProtocol = gosnmp.SHA512 case "": sp.AuthenticationProtocol = gosnmp.NoAuth default: @@ -125,6 +134,14 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { sp.PrivacyProtocol = gosnmp.DES case "aes": sp.PrivacyProtocol = gosnmp.AES + case "aes192": + sp.PrivacyProtocol = gosnmp.AES192 + case "aes192c": + sp.PrivacyProtocol = gosnmp.AES192C + case "aes256": + sp.PrivacyProtocol = gosnmp.AES256 + case "aes256c": + sp.PrivacyProtocol = gosnmp.AES256C case "": sp.PrivacyProtocol = gosnmp.NoPriv default: @@ -156,11 +173,14 @@ func (gs *GosnmpWrapper) SetAgent(agent string) error { return err } + // Only allow udp{4,6} and tcp{4,6}. + // Allowing ip{4,6} does not make sense as specifying a port + // requires the specification of a protocol. + // gosnmp does not handle these errors well, which is why + // they can result in cryptic errors by net.Dial. switch u.Scheme { - case "tcp": - gs.Transport = "tcp" - case "", "udp": - gs.Transport = "udp" + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + gs.Transport = u.Scheme default: return fmt.Errorf("unsupported scheme: %v", u.Scheme) } diff --git a/internal/templating/template.go b/internal/templating/template.go index 235d2f2a58928..09b78e19fce66 100644 --- a/internal/templating/template.go +++ b/internal/templating/template.go @@ -59,10 +59,8 @@ func (t *Template) Apply(line string, joiner string) (string, map[string]string, field = append(field, fields[i]) case "field*": field = append(field, fields[i:]...) - break case "measurement*": measurement = append(measurement, fields[i:]...) - break default: tags[tag] = append(tags[tag], fields[i]) } diff --git a/internal/type_conversions.go b/internal/type_conversions.go new file mode 100644 index 0000000000000..e2506a9068de3 --- /dev/null +++ b/internal/type_conversions.go @@ -0,0 +1,200 @@ +package internal + +import ( + "fmt" + "strconv" +) + +func ToString(value interface{}) (string, error) { + switch v := value.(type) { + case string: + return v, nil + case []byte: + return string(v), nil + case int: + return strconv.FormatInt(int64(v), 10), nil + case int8: + return strconv.FormatInt(int64(v), 10), nil + case int16: + return strconv.FormatInt(int64(v), 10), nil + case int32: + return strconv.FormatInt(int64(v), 10), nil + case int64: + return strconv.FormatInt(v, 10), nil + case uint: + return strconv.FormatUint(uint64(v), 10), nil + case uint8: + return strconv.FormatUint(uint64(v), 10), nil + case uint16: + return strconv.FormatUint(uint64(v), 10), nil + case uint32: + return strconv.FormatUint(uint64(v), 10), nil + case uint64: + return strconv.FormatUint(v, 10), nil + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(v, 'f', -1, 64), nil + case bool: + return strconv.FormatBool(v), nil + case fmt.Stringer: + return v.String(), nil + case nil: + return "", nil + } + return "", fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToFloat64(value interface{}) (float64, error) { + switch v := value.(type) { + case string: + return strconv.ParseFloat(v, 64) + case []byte: + return strconv.ParseFloat(string(v), 64) + case fmt.Stringer: + return strconv.ParseFloat(v.String(), 64) + case int: + return float64(v), nil + case int8: + return float64(v), nil + case int16: + return float64(v), nil + case int32: + return float64(v), nil + case int64: + return float64(v), nil + case uint: + return float64(v), nil + case uint8: + return float64(v), nil + case uint16: + return float64(v), nil + case uint32: + return float64(v), nil + case uint64: + return float64(v), nil + case float32: + return float64(v), nil + case float64: + return v, nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToInt64(value interface{}) (int64, error) { + switch v := value.(type) { + case string: + return strconv.ParseInt(v, 10, 64) + case []byte: + return strconv.ParseInt(string(v), 10, 64) + case fmt.Stringer: + return strconv.ParseInt(v.String(), 10, 64) + case int: + return int64(v), nil + case int8: + return int64(v), nil + case int16: + return int64(v), nil + case int32: + return int64(v), nil + case int64: + return v, nil + case uint: + return int64(v), nil + case uint8: + return int64(v), nil + case uint16: + return int64(v), nil + case uint32: + return int64(v), nil + case uint64: + return int64(v), nil + case float32: + return int64(v), nil + case float64: + return int64(v), nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToUint64(value interface{}) (uint64, error) { + switch v := value.(type) { + case string: + return strconv.ParseUint(v, 10, 64) + case []byte: + return strconv.ParseUint(string(v), 10, 64) + case fmt.Stringer: + return strconv.ParseUint(v.String(), 10, 64) + case int: + return uint64(v), nil + case int8: + return uint64(v), nil + case int16: + return uint64(v), nil + case int32: + return uint64(v), nil + case int64: + return uint64(v), nil + case uint: + return uint64(v), nil + case uint8: + return uint64(v), nil + case uint16: + return uint64(v), nil + case uint32: + return uint64(v), nil + case uint64: + return v, nil + case float32: + return uint64(v), nil + case float64: + return uint64(v), nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToBool(value interface{}) (bool, error) { + switch v := value.(type) { + case string: + return strconv.ParseBool(v) + case []byte: + return strconv.ParseBool(string(v)) + case fmt.Stringer: + return strconv.ParseBool(v.String()) + case int: + return v > 0, nil + case int8: + return v > 0, nil + case int16: + return v > 0, nil + case int32: + return v > 0, nil + case int64: + return v > 0, nil + case uint: + return v > 0, nil + case uint8: + return v > 0, nil + case uint16: + return v > 0, nil + case uint32: + return v > 0, nil + case uint64: + return v > 0, nil + case float32: + return v > 0, nil + case float64: + return v > 0, nil + case bool: + return v, nil + case nil: + return false, nil + } + return false, fmt.Errorf("type \"%T\" unsupported", value) +} diff --git a/internal/usage.go b/internal/usage.go index 6eff30e6b0b21..916b5cb86e908 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal @@ -16,6 +17,9 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files + --watch-config Telegraf will restart on local config changes. Monitor changes + using either fs notifications or polling. Valid values: 'inotify' or 'poll'. + Monitoring is off by default. --plugin-directory directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced. diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 7fee6a1f1595c..9a1169851cd74 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal @@ -16,6 +17,9 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files + --watch-config Telegraf will restart on local config changes. Monitor changes + using either fs notifications or polling. Valid values: 'inotify' or 'poll'. + Monitoring is off by default. --debug turn on debug logging --input-filter filter the inputs to enable, separator is : --input-list print available input plugins. diff --git a/logger/event_logger.go b/logger/event_logger.go index 48b645ddedb3c..bb0672de76c5c 100644 --- a/logger/event_logger.go +++ b/logger/event_logger.go @@ -1,35 +1,42 @@ +//go:build windows +// +build windows + package logger import ( "io" + "log" "strings" "github.com/influxdata/wlog" - "github.com/kardianos/service" + "golang.org/x/sys/windows/svc/eventlog" ) const ( LogTargetEventlog = "eventlog" + eidInfo = 1 + eidWarning = 2 + eidError = 3 ) type eventLogger struct { - logger service.Logger + logger *eventlog.Log } func (t *eventLogger) Write(b []byte) (n int, err error) { loc := prefixRegex.FindIndex(b) n = len(b) if loc == nil { - err = t.logger.Info(b) + err = t.logger.Info(1, string(b)) } else if n > 2 { //skip empty log messages line := strings.Trim(string(b[loc[1]:]), " \t\r\n") switch rune(b[loc[0]]) { case 'I': - err = t.logger.Info(line) + err = t.logger.Info(eidInfo, line) case 'W': - err = t.logger.Warning(line) + err = t.logger.Warning(eidWarning, line) case 'E': - err = t.logger.Error(line) + err = t.logger.Error(eidError, line) } } @@ -37,13 +44,20 @@ func (t *eventLogger) Write(b []byte) (n int, err error) { } type eventLoggerCreator struct { - serviceLogger service.Logger + logger *eventlog.Log } func (e *eventLoggerCreator) CreateLogger(config LogConfig) (io.Writer, error) { - return wlog.NewWriter(&eventLogger{logger: e.serviceLogger}), nil + return wlog.NewWriter(&eventLogger{logger: e.logger}), nil } -func RegisterEventLogger(serviceLogger service.Logger) { - registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: serviceLogger}) +func RegisterEventLogger(name string) error { + eventLog, err := eventlog.Open(name) + if err != nil { + log.Printf("E! An error occurred while initializing an event logger. %s", err) + return err + } + + registerLogger(LogTargetEventlog, &eventLoggerCreator{logger: eventLog}) + return nil } diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go index f2d4eb4209e89..d268252779867 100644 --- a/logger/event_logger_test.go +++ b/logger/event_logger_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package logger @@ -10,9 +11,9 @@ import ( "testing" "time" - "github.com/kardianos/service" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/sys/windows/svc/eventlog" ) type Levels int @@ -30,7 +31,8 @@ type Event struct { func getEventLog(t *testing.T, since time.Time) []Event { timeStr := since.UTC().Format(time.RFC3339) - cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='Telegraf']]]") + timeStr = timeStr[:19] + cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='telegraf']]]") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() @@ -44,7 +46,7 @@ func getEventLog(t *testing.T, since time.Time) []Event { return events.Events } -func TestEventLog(t *testing.T) { +func TestEventLogIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -67,7 +69,7 @@ func TestEventLog(t *testing.T) { assert.Contains(t, events, Event{Message: "Err message", Level: Error}) } -func TestRestrictedEventLog(t *testing.T) { +func TestRestrictedEventLogIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -91,10 +93,8 @@ func TestRestrictedEventLog(t *testing.T) { } func prepareLogger(t *testing.T) { - svc, err := service.New(nil, &service.Config{Name: "Telegraf"}) + eventLog, err := eventlog.Open("telegraf") require.NoError(t, err) - svcLogger, err := svc.SystemLogger(nil) - require.NoError(t, err) - require.NotNil(t, svcLogger) - registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: svcLogger}) + require.NotNil(t, eventLog) + registerLogger(LogTargetEventlog, &eventLoggerCreator{logger: eventLog}) } diff --git a/logger/logger.go b/logger/logger.go index a276d2e807c6c..27e3c79f1fa06 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -6,9 +6,10 @@ import ( "log" "os" "regexp" + "strings" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/wlog" ) @@ -33,11 +34,13 @@ type LogConfig struct { // logger will fallback to stderr Logfile string // will rotate when current file at the specified time interval - RotationInterval internal.Duration + RotationInterval config.Duration // will rotate when current file size exceeds this parameter. - RotationMaxSize internal.Size + RotationMaxSize config.Size // maximum rotated files to keep (older ones will be deleted) RotationMaxArchives int + // pick a timezone to use when logging. or type 'local' for local time. + LogWithTimezone string } type LoggerCreator interface { @@ -56,21 +59,24 @@ func registerLogger(name string, loggerCreator LoggerCreator) { type telegrafLog struct { writer io.Writer internalWriter io.Writer + timezone *time.Location } func (t *telegrafLog) Write(b []byte) (n int, err error) { var line []byte + timeToPrint := time.Now().In(t.timezone) + if !prefixRegex.Match(b) { - line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...) + line = append([]byte(timeToPrint.Format(time.RFC3339)+" I! "), b...) } else { - line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...) + line = append([]byte(timeToPrint.Format(time.RFC3339)+" "), b...) } + return t.writer.Write(line) } func (t *telegrafLog) Close() error { - var stdErrWriter io.Writer - stdErrWriter = os.Stderr + stdErrWriter := os.Stderr // avoid closing stderr if t.internalWriter != stdErrWriter { closer, isCloser := t.internalWriter.(io.Closer) @@ -83,11 +89,23 @@ func (t *telegrafLog) Close() error { } // newTelegrafWriter returns a logging-wrapped writer. -func newTelegrafWriter(w io.Writer) io.Writer { +func newTelegrafWriter(w io.Writer, c LogConfig) (io.Writer, error) { + timezoneName := c.LogWithTimezone + + if strings.ToLower(timezoneName) == "local" { + timezoneName = "Local" + } + + tz, err := time.LoadLocation(timezoneName) + if err != nil { + return nil, errors.New("error while setting logging timezone: " + err.Error()) + } + return &telegrafLog{ writer: wlog.NewWriter(w), internalWriter: w, - } + timezone: tz, + }, nil } // SetupLogging configures the logging output. @@ -106,7 +124,7 @@ func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { case LogTargetFile: if config.Logfile != "" { var err error - if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil { + if writer, err = rotate.NewFileWriter(config.Logfile, time.Duration(config.RotationInterval), int64(config.RotationMaxSize), config.RotationMaxArchives); err != nil { log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err) writer = defaultWriter } @@ -120,7 +138,7 @@ func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { writer = defaultWriter } - return newTelegrafWriter(writer), nil + return newTelegrafWriter(writer, config) } // Keep track what is actually set as a log output, because log package doesn't provide a getter. diff --git a/logger/logger_test.go b/logger/logger_test.go index a5f53ca17e89b..47af1d4591bff 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -3,19 +3,18 @@ package logger import ( "bytes" "io" - "io/ioutil" "log" "os" "path/filepath" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() @@ -24,13 +23,13 @@ func TestWriteLogToFile(t *testing.T) { log.Printf("I! TEST") log.Printf("D! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestDebugWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -38,13 +37,13 @@ func TestDebugWriteLogToFile(t *testing.T) { SetupLogging(config) log.Printf("D! TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z D! TEST\n")) } func TestErrorWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -53,13 +52,13 @@ func TestErrorWriteLogToFile(t *testing.T) { log.Printf("E! TEST") log.Printf("I! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z E! TEST\n")) } func TestAddDefaultLogLevel(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -67,13 +66,13 @@ func TestAddDefaultLogLevel(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestWriteToTruncatedFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -81,7 +80,7 @@ func TestWriteToTruncatedFile(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) @@ -91,18 +90,18 @@ func TestWriteToTruncatedFile(t *testing.T) { log.Printf("SHOULD BE FIRST") - f, err = ioutil.ReadFile(tmpfile.Name()) + f, err = os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) } func TestWriteToFileInRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "LogRotation") + tempDir, err := os.MkdirTemp("", "LogRotation") require.NoError(t, err) - config := createBasicLogConfig(filepath.Join(tempDir, "test.log")) - config.LogTarget = LogTargetFile - config.RotationMaxSize = internal.Size{Size: int64(30)} - writer := newLogWriter(config) + cfg := createBasicLogConfig(filepath.Join(tempDir, "test.log")) + cfg.LogTarget = LogTargetFile + cfg.RotationMaxSize = config.Size(30) + writer := newLogWriter(cfg) // Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use. closer, isCloser := writer.(io.Closer) assert.True(t, isCloser) @@ -110,7 +109,7 @@ func TestWriteToFileInRotation(t *testing.T) { log.Printf("I! TEST 1") // Writes 31 bytes, will rotate log.Printf("I! TEST") // Writes 29 byes, no rotation expected - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } @@ -137,7 +136,10 @@ func TestLogTargetSettings(t *testing.T) { func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer - w := newTelegrafWriter(&buf) + w, err := newTelegrafWriter(&buf, LogConfig{}) + if err != nil { + panic("Unable to create log writer.") + } for i := 0; i < b.N; i++ { buf.Reset() w.Write(msg) diff --git a/metric.go b/metric.go index 6c7b1c6c5f75c..23098bb8bc71e 100644 --- a/metric.go +++ b/metric.go @@ -57,9 +57,7 @@ type Metric interface { Time() time.Time // Type returns a general type for the entire metric that describes how you - // might interpret, aggregate the values. - // - // This method may be removed in the future and its use is discouraged. + // might interpret, aggregate the values. Used by prometheus and statsd. Type() ValueType // SetName sets the metric name. @@ -122,14 +120,4 @@ type Metric interface { // Drop marks the metric as processed successfully without being written // to any output. Drop() - - // SetAggregate indicates the metric is an aggregated value. - // - // This method may be removed in the future and its use is discouraged. - SetAggregate(bool) - - // IsAggregate returns true if the Metric is an aggregate. - // - // This method may be removed in the future and its use is discouraged. - IsAggregate() bool } diff --git a/metric/metric.go b/metric/metric.go index 517645a831280..f8483459a93bf 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -15,8 +15,7 @@ type metric struct { fields []*telegraf.Field tm time.Time - tp telegraf.ValueType - aggregate bool + tp telegraf.ValueType } func New( @@ -25,7 +24,7 @@ func New( fields map[string]interface{}, tm time.Time, tp ...telegraf.ValueType, -) (telegraf.Metric, error) { +) telegraf.Metric { var vtype telegraf.ValueType if len(tp) > 0 { vtype = tp[0] @@ -61,19 +60,18 @@ func New( } } - return m, nil + return m } // FromMetric returns a deep copy of the metric with any tracking information // removed. func FromMetric(other telegraf.Metric) telegraf.Metric { m := &metric{ - name: other.Name(), - tags: make([]*telegraf.Tag, len(other.TagList())), - fields: make([]*telegraf.Field, len(other.FieldList())), - tm: other.Time(), - tp: other.Type(), - aggregate: other.IsAggregate(), + name: other.Name(), + tags: make([]*telegraf.Tag, len(other.TagList())), + fields: make([]*telegraf.Field, len(other.FieldList())), + tm: other.Time(), + tp: other.Type(), } for i, tag := range other.TagList() { @@ -233,12 +231,11 @@ func (m *metric) SetTime(t time.Time) { func (m *metric) Copy() telegraf.Metric { m2 := &metric{ - name: m.name, - tags: make([]*telegraf.Tag, len(m.tags)), - fields: make([]*telegraf.Field, len(m.fields)), - tm: m.tm, - tp: m.tp, - aggregate: m.aggregate, + name: m.name, + tags: make([]*telegraf.Tag, len(m.tags)), + fields: make([]*telegraf.Field, len(m.fields)), + tm: m.tm, + tp: m.tp, } for i, tag := range m.tags { @@ -251,14 +248,6 @@ func (m *metric) Copy() telegraf.Metric { return m2 } -func (m *metric) SetAggregate(b bool) { - m.aggregate = true -} - -func (m *metric) IsAggregate() bool { - return m.aggregate -} - func (m *metric) HashID() uint64 { h := fnv.New64a() h.Write([]byte(m.name)) @@ -297,7 +286,7 @@ func convertField(v interface{}) interface{} { case uint: return uint64(v) case uint64: - return uint64(v) + return v case []byte: return string(v) case int32: @@ -340,7 +329,7 @@ func convertField(v interface{}) interface{} { } case *uint64: if v != nil { - return uint64(*v) + return *v } case *[]byte: if v != nil { diff --git a/metric/metric_test.go b/metric/metric_test.go index 7033d32303f16..d4d1cb11bb8ed 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -20,8 +20,7 @@ func TestNewMetric(t *testing.T) { "usage_idle": float64(99), "usage_busy": float64(1), } - m, err := New("cpu", tags, fields, now) - require.NoError(t, err) + m := New("cpu", tags, fields, now) require.Equal(t, "cpu", m.Name()) require.Equal(t, tags, m.Tags()) @@ -38,10 +37,7 @@ func baseMetric() telegraf.Metric { } now := time.Now() - m, err := New("cpu", tags, fields, now) - if err != nil { - panic(err) - } + m := New("cpu", tags, fields, now) return m } @@ -176,7 +172,7 @@ func TestTagList_Sorted(t *testing.T) { func TestEquals(t *testing.T) { now := time.Now() - m1, err := New("cpu", + m1 := New("cpu", map[string]string{ "host": "localhost", }, @@ -185,9 +181,8 @@ func TestEquals(t *testing.T) { }, now, ) - require.NoError(t, err) - m2, err := New("cpu", + m2 := New("cpu", map[string]string{ "host": "localhost", }, @@ -196,7 +191,6 @@ func TestEquals(t *testing.T) { }, now, ) - require.NoError(t, err) lhs := m1.(*metric) require.Equal(t, lhs, m2) @@ -208,7 +202,7 @@ func TestEquals(t *testing.T) { } func TestHashID(t *testing.T) { - m, _ := New( + m := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -241,7 +235,7 @@ func TestHashID(t *testing.T) { } func TestHashID_Consistency(t *testing.T) { - m, _ := New( + m := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -255,7 +249,7 @@ func TestHashID_Consistency(t *testing.T) { ) hash := m.HashID() - m2, _ := New( + m2 := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -274,7 +268,7 @@ func TestHashID_Consistency(t *testing.T) { } func TestHashID_Delimiting(t *testing.T) { - m1, _ := New( + m1 := New( "cpu", map[string]string{ "a": "x", @@ -286,7 +280,7 @@ func TestHashID_Delimiting(t *testing.T) { }, time.Now(), ) - m2, _ := New( + m2 := New( "cpu", map[string]string{ "a": "xbycz", @@ -328,15 +322,7 @@ func TestValueType(t *testing.T) { fields := map[string]interface{}{ "value": float64(42), } - m, err := New("cpu", tags, fields, now, telegraf.Gauge) - assert.NoError(t, err) + m := New("cpu", tags, fields, now, telegraf.Gauge) assert.Equal(t, telegraf.Gauge, m.Type()) } - -func TestCopyAggregate(t *testing.T) { - m1 := baseMetric() - m1.SetAggregate(true) - m2 := m1.Copy() - assert.True(t, m2.IsAggregate()) -} diff --git a/metric/series_grouper.go b/metric/series_grouper.go index 5dc66e11b8e00..03f110abcb429 100644 --- a/metric/series_grouper.go +++ b/metric/series_grouper.go @@ -1,10 +1,9 @@ package metric import ( - "hash/fnv" - "io" + "encoding/binary" + "hash/maphash" "sort" - "strconv" "time" "github.com/influxdata/telegraf" @@ -23,14 +22,17 @@ import ( // + cpu,host=localhost idle_time=42,usage_time=42 func NewSeriesGrouper() *SeriesGrouper { return &SeriesGrouper{ - metrics: make(map[uint64]telegraf.Metric), - ordered: []telegraf.Metric{}, + metrics: make(map[uint64]telegraf.Metric), + ordered: []telegraf.Metric{}, + hashSeed: maphash.MakeSeed(), } } type SeriesGrouper struct { metrics map[uint64]telegraf.Metric ordered []telegraf.Metric + + hashSeed maphash.Seed } // Add adds a field key and value to the series. @@ -41,46 +43,65 @@ func (g *SeriesGrouper) Add( field string, fieldValue interface{}, ) error { - var err error - id := groupID(measurement, tags, tm) - metric := g.metrics[id] - if metric == nil { - metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) - if err != nil { - return err - } - g.metrics[id] = metric - g.ordered = append(g.ordered, metric) + taglist := make([]*telegraf.Tag, 0, len(tags)) + for k, v := range tags { + taglist = append(taglist, + &telegraf.Tag{Key: k, Value: v}) + } + sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) + + id := groupID(g.hashSeed, measurement, taglist, tm) + m := g.metrics[id] + if m == nil { + m = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) + g.metrics[id] = m + g.ordered = append(g.ordered, m) } else { - metric.AddField(field, fieldValue) + m.AddField(field, fieldValue) } return nil } +// AddMetric adds a metric to the series, merging with any previous matching metrics. +func (g *SeriesGrouper) AddMetric( + metric telegraf.Metric, +) { + id := groupID(g.hashSeed, metric.Name(), metric.TagList(), metric.Time()) + m := g.metrics[id] + if m == nil { + m = metric.Copy() + g.metrics[id] = m + g.ordered = append(g.ordered, m) + } else { + for _, f := range metric.FieldList() { + m.AddField(f.Key, f.Value) + } + } +} + // Metrics returns the metrics grouped by series and time. func (g *SeriesGrouper) Metrics() []telegraf.Metric { return g.ordered } -func groupID(measurement string, tags map[string]string, tm time.Time) uint64 { - h := fnv.New64a() - h.Write([]byte(measurement)) - h.Write([]byte("\n")) +func groupID(seed maphash.Seed, measurement string, taglist []*telegraf.Tag, tm time.Time) uint64 { + var mh maphash.Hash + mh.SetSeed(seed) + + mh.WriteString(measurement) + mh.WriteByte(0) - taglist := make([]*telegraf.Tag, 0, len(tags)) - for k, v := range tags { - taglist = append(taglist, - &telegraf.Tag{Key: k, Value: v}) - } - sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) for _, tag := range taglist { - h.Write([]byte(tag.Key)) - h.Write([]byte("\n")) - h.Write([]byte(tag.Value)) - h.Write([]byte("\n")) + mh.WriteString(tag.Key) + mh.WriteByte(0) + mh.WriteString(tag.Value) + mh.WriteByte(0) } - h.Write([]byte("\n")) + mh.WriteByte(0) + + var tsBuf [8]byte + binary.BigEndian.PutUint64(tsBuf[:], uint64(tm.UnixNano())) + mh.Write(tsBuf[:]) - io.WriteString(h, strconv.FormatInt(tm.UnixNano(), 10)) - return h.Sum64() + return mh.Sum64() } diff --git a/metric/series_grouper_test.go b/metric/series_grouper_test.go new file mode 100644 index 0000000000000..eee338a41d130 --- /dev/null +++ b/metric/series_grouper_test.go @@ -0,0 +1,37 @@ +package metric + +import ( + "hash/maphash" + "testing" + "time" +) + +var m = New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f1": 1, + "f2": 2, + "f3": 3, + "f4": 4, + "f5": 5, + "f6": 6, + "f7": 7, + "f8": 8, + }, + time.Now(), +) + +var result uint64 + +var hashSeed = maphash.MakeSeed() + +func BenchmarkGroupID(b *testing.B) { + for n := 0; n < b.N; n++ { + result = groupID(hashSeed, m.Name(), m.TagList(), m.Time()) + } +} diff --git a/metric/tracking.go b/metric/tracking.go index e370d9f2a7ccc..e0bf5ff8e6596 100644 --- a/metric/tracking.go +++ b/metric/tracking.go @@ -117,7 +117,6 @@ func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf. d: d, } group[i] = dm - } if finalizer != nil { runtime.SetFinalizer(d, finalizer) diff --git a/metric/tracking_test.go b/metric/tracking_test.go index 0ca1ca4daa4bc..4d89a32c18623 100644 --- a/metric/tracking_test.go +++ b/metric/tracking_test.go @@ -16,10 +16,7 @@ func mustMetric( tm time.Time, tp ...telegraf.ValueType, ) telegraf.Metric { - m, err := New(name, tags, fields, tm, tp...) - if err != nil { - panic("mustMetric") - } + m := New(name, tags, fields, tm, tp...) return m } @@ -78,12 +75,13 @@ func TestTracking(t *testing.T) { { name: "accept", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m.Accept() @@ -93,12 +91,13 @@ func TestTracking(t *testing.T) { { name: "reject", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m.Reject() @@ -108,12 +107,13 @@ func TestTracking(t *testing.T) { { name: "accept copy", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() @@ -125,12 +125,13 @@ func TestTracking(t *testing.T) { { name: "copy with accept and done", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() @@ -142,12 +143,13 @@ func TestTracking(t *testing.T) { { name: "copy with mixed delivery", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() diff --git a/models/buffer.go b/models/buffer.go index 9cc1a3d889f38..5f721dc98081b 100644 --- a/models/buffer.go +++ b/models/buffer.go @@ -220,17 +220,6 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { b.BufferSize.Set(int64(b.length())) } -// dist returns the distance between two indexes. Because this data structure -// uses a half open range the arguments must both either left side or right -// side pairs. -func (b *Buffer) dist(begin, end int) int { - if begin <= end { - return end - begin - } else { - return b.cap - begin + end - } -} - // next returns the next index with wrapping. func (b *Buffer) next(index int) int { index++ @@ -247,15 +236,6 @@ func (b *Buffer) nextby(index, count int) int { return index } -// next returns the prev index with wrapping. -func (b *Buffer) prev(index int) int { - index-- - if index < 0 { - return b.cap - 1 - } - return index -} - // prevby returns the index that is count older with wrapping. func (b *Buffer) prevby(index, count int) int { index -= count diff --git a/models/buffer_test.go b/models/buffer_test.go index 9aef94fb86585..d830ac91c6dd9 100644 --- a/models/buffer_test.go +++ b/models/buffer_test.go @@ -34,7 +34,7 @@ func Metric() telegraf.Metric { } func MetricTime(sec int64) telegraf.Metric { - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -42,9 +42,6 @@ func MetricTime(sec int64) telegraf.Metric { }, time.Unix(sec, 0), ) - if err != nil { - panic(err) - } return m } diff --git a/models/filter.go b/models/filter.go index 13627daad3434..8103c23173297 100644 --- a/models/filter.go +++ b/models/filter.go @@ -54,41 +54,41 @@ func (f *Filter) Compile() error { var err error f.nameDrop, err = filter.Compile(f.NameDrop) if err != nil { - return fmt.Errorf("Error compiling 'namedrop', %s", err) + return fmt.Errorf("error compiling 'namedrop', %s", err) } f.namePass, err = filter.Compile(f.NamePass) if err != nil { - return fmt.Errorf("Error compiling 'namepass', %s", err) + return fmt.Errorf("error compiling 'namepass', %s", err) } f.fieldDrop, err = filter.Compile(f.FieldDrop) if err != nil { - return fmt.Errorf("Error compiling 'fielddrop', %s", err) + return fmt.Errorf("error compiling 'fielddrop', %s", err) } f.fieldPass, err = filter.Compile(f.FieldPass) if err != nil { - return fmt.Errorf("Error compiling 'fieldpass', %s", err) + return fmt.Errorf("error compiling 'fieldpass', %s", err) } f.tagExclude, err = filter.Compile(f.TagExclude) if err != nil { - return fmt.Errorf("Error compiling 'tagexclude', %s", err) + return fmt.Errorf("error compiling 'tagexclude', %s", err) } f.tagInclude, err = filter.Compile(f.TagInclude) if err != nil { - return fmt.Errorf("Error compiling 'taginclude', %s", err) + return fmt.Errorf("error compiling 'taginclude', %s", err) } for i := range f.TagDrop { f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter) if err != nil { - return fmt.Errorf("Error compiling 'tagdrop', %s", err) + return fmt.Errorf("error compiling 'tagdrop', %s", err) } } for i := range f.TagPass { f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter) if err != nil { - return fmt.Errorf("Error compiling 'tagpass', %s", err) + return fmt.Errorf("error compiling 'tagpass', %s", err) } } return nil @@ -132,17 +132,11 @@ func (f *Filter) IsActive() bool { // based on the drop/pass filter parameters func (f *Filter) shouldNamePass(key string) bool { pass := func(f *Filter) bool { - if f.namePass.Match(key) { - return true - } - return false + return f.namePass.Match(key) } drop := func(f *Filter) bool { - if f.nameDrop.Match(key) { - return false - } - return true + return !f.nameDrop.Match(key) } if f.namePass != nil && f.nameDrop != nil { diff --git a/models/filter_test.go b/models/filter_test.go index d241244b9d704..aa32e095163c4 100644 --- a/models/filter_test.go +++ b/models/filter_test.go @@ -15,11 +15,10 @@ func TestFilter_ApplyEmpty(t *testing.T) { require.NoError(t, f.Compile()) require.False(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) } @@ -37,11 +36,10 @@ func TestFilter_ApplyTagsDontPass(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{"cpu": "cpu-total"}, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) require.False(t, f.Select(m)) } @@ -53,14 +51,13 @@ func TestFilter_ApplyDeleteFields(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{ "value": int64(1), "value2": int64(2), }, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) f.Modify(m) require.Equal(t, map[string]interface{}{"value2": int64(2)}, m.Fields()) @@ -74,14 +71,13 @@ func TestFilter_ApplyDeleteAllFields(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{ "value": int64(1), "value2": int64(2), }, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) f.Modify(m) require.Len(t, m.FieldList(), 0) @@ -332,14 +328,13 @@ func TestFilter_TagDrop(t *testing.T) { } func TestFilter_FilterTagsNoMatches(t *testing.T) { - m, err := metric.New("m", + m := metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f := Filter{ TagExclude: []string{"nomatch"}, } @@ -361,14 +356,13 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) { } func TestFilter_FilterTagsMatches(t *testing.T) { - m, err := metric.New("m", + m := metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f := Filter{ TagExclude: []string{"ho*"}, } @@ -379,14 +373,13 @@ func TestFilter_FilterTagsMatches(t *testing.T) { "mytag": "foobar", }, m.Tags()) - m, err = metric.New("m", + m = metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f = Filter{ TagInclude: []string{"my*"}, } @@ -402,7 +395,6 @@ func TestFilter_FilterTagsMatches(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterNamePassAndDrop(t *testing.T) { - inputData := []string{"name1", "name2", "name3", "name4"} expectedResult := []bool{false, true, false, false} @@ -422,7 +414,6 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterFieldPassAndDrop(t *testing.T) { - inputData := []string{"field1", "field2", "field3", "field4"} expectedResult := []bool{false, true, false, false} @@ -479,7 +470,6 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) { for i, tag := range inputData { require.Equal(t, f.shouldTagsPass(tag), expectedResult[i]) } - } func BenchmarkFilter(b *testing.B) { diff --git a/models/log.go b/models/log.go index c0b52a812d924..063a43d6ebeac 100644 --- a/models/log.go +++ b/models/log.go @@ -100,6 +100,4 @@ func SetLoggerOnPlugin(i interface{}, log telegraf.Logger) { log.Debugf("Plugin %q defines a 'Log' field on its struct of an unexpected type %q. Expected telegraf.Logger", valI.Type().Name(), field.Type().String()) } - - return } diff --git a/models/running_aggregator.go b/models/running_aggregator.go index cbfb9889b87e5..5aa3979c36926 100644 --- a/models/running_aggregator.go +++ b/models/running_aggregator.go @@ -117,10 +117,6 @@ func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { r.Config.Tags, nil) - if m != nil { - m.SetAggregate(true) - } - r.MetricsPushed.Incr(1) return m diff --git a/models/running_input_test.go b/models/running_input_test.go index ff3747116f6ca..8f9390f53b730 100644 --- a/models/running_input_test.go +++ b/models/running_input_test.go @@ -23,17 +23,16 @@ func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { require.NoError(t, ri.Config.Filter.Compile()) ri.SetDefaultTags(map[string]string{"a": "x", "b": "y"}) - m, err := metric.New("cpu", + m := metric.New("cpu", map[string]string{}, map[string]interface{}{ "value": 42, }, now) - require.NoError(t, err) actual := ri.MakeMetric(m) - expected, err := metric.New("cpu", + expected := metric.New("cpu", map[string]string{ "b": "y", }, @@ -41,7 +40,6 @@ func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { "value": 42, }, now) - require.NoError(t, err) testutil.RequireMetricEqual(t, expected, actual) } @@ -52,13 +50,12 @@ func TestMakeMetricNoFields(t *testing.T) { Name: "TestRunningInput", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{}, now, telegraf.Untyped) m = ri.MakeMetric(m) - require.NoError(t, err) assert.Nil(t, m) } @@ -69,7 +66,7 @@ func TestMakeMetricNilFields(t *testing.T) { Name: "TestRunningInput", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), @@ -77,17 +74,15 @@ func TestMakeMetricNilFields(t *testing.T) { }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int(101), }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -110,7 +105,7 @@ func TestMakeMetricWithPluginTags(t *testing.T) { telegraf.Untyped) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{ "foo": "bar", }, @@ -119,7 +114,6 @@ func TestMakeMetricWithPluginTags(t *testing.T) { }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -135,7 +129,7 @@ func TestMakeMetricFilteredOut(t *testing.T) { assert.NoError(t, ri.Config.Filter.Compile()) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), @@ -143,7 +137,6 @@ func TestMakeMetricFilteredOut(t *testing.T) { now, telegraf.Untyped) m = ri.MakeMetric(m) - require.NoError(t, err) assert.Nil(t, m) } @@ -164,7 +157,7 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { now, telegraf.Untyped) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{ "foo": "bar", }, @@ -173,7 +166,6 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -184,23 +176,21 @@ func TestMakeMetricNameOverride(t *testing.T) { NameOverride: "foobar", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("foobar", + expected := metric.New("foobar", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -211,23 +201,21 @@ func TestMakeMetricNamePrefix(t *testing.T) { MeasurementPrefix: "foobar_", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("foobar_RITest", + expected := metric.New("foobar_RITest", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -238,23 +226,21 @@ func TestMakeMetricNameSuffix(t *testing.T) { MeasurementSuffix: "_foobar", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("RITest_foobar", + expected := metric.New("RITest_foobar", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -289,6 +275,6 @@ func TestMetricErrorCounters(t *testing.T) { type testInput struct{} -func (t *testInput) Description() string { return "" } -func (t *testInput) SampleConfig() string { return "" } -func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil } +func (t *testInput) Description() string { return "" } +func (t *testInput) SampleConfig() string { return "" } +func (t *testInput) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/models/running_output.go b/models/running_output.go index 894ae011c986d..6f5f8c0a84bad 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -11,10 +11,10 @@ import ( const ( // Default size of metrics batch size. - DEFAULT_METRIC_BATCH_SIZE = 1000 + DefaultMetricBatchSize = 1000 // Default number of metrics kept. It should be a multiple of batch size. - DEFAULT_METRIC_BUFFER_LIMIT = 10000 + DefaultMetricBufferLimit = 10000 ) // OutputConfig containing name and filter @@ -56,7 +56,6 @@ type RunningOutput struct { } func NewRunningOutput( - name string, output telegraf.Output, config *OutputConfig, batchSize int, @@ -78,13 +77,13 @@ func NewRunningOutput( bufferLimit = config.MetricBufferLimit } if bufferLimit == 0 { - bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT + bufferLimit = DefaultMetricBufferLimit } if config.MetricBatchSize > 0 { batchSize = config.MetricBatchSize } if batchSize == 0 { - batchSize = DEFAULT_METRIC_BATCH_SIZE + batchSize = DefaultMetricBatchSize } ro := &RunningOutput{ @@ -114,8 +113,8 @@ func (r *RunningOutput) LogName() string { return logName("outputs", r.Config.Name, r.Config.Alias) } -func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { - ro.MetricsFiltered.Incr(1) +func (r *RunningOutput) metricFiltered(metric telegraf.Metric) { + r.MetricsFiltered.Incr(1) metric.Drop() } @@ -125,7 +124,6 @@ func (r *RunningOutput) Init() error { if err != nil { return err } - } return nil } @@ -133,45 +131,45 @@ func (r *RunningOutput) Init() error { // AddMetric adds a metric to the output. // // Takes ownership of metric -func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { - if ok := ro.Config.Filter.Select(metric); !ok { - ro.metricFiltered(metric) +func (r *RunningOutput) AddMetric(metric telegraf.Metric) { + if ok := r.Config.Filter.Select(metric); !ok { + r.metricFiltered(metric) return } - ro.Config.Filter.Modify(metric) + r.Config.Filter.Modify(metric) if len(metric.FieldList()) == 0 { - ro.metricFiltered(metric) + r.metricFiltered(metric) return } - if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { - ro.aggMutex.Lock() + if output, ok := r.Output.(telegraf.AggregatingOutput); ok { + r.aggMutex.Lock() output.Add(metric) - ro.aggMutex.Unlock() + r.aggMutex.Unlock() return } - if len(ro.Config.NameOverride) > 0 { - metric.SetName(ro.Config.NameOverride) + if len(r.Config.NameOverride) > 0 { + metric.SetName(r.Config.NameOverride) } - if len(ro.Config.NamePrefix) > 0 { - metric.AddPrefix(ro.Config.NamePrefix) + if len(r.Config.NamePrefix) > 0 { + metric.AddPrefix(r.Config.NamePrefix) } - if len(ro.Config.NameSuffix) > 0 { - metric.AddSuffix(ro.Config.NameSuffix) + if len(r.Config.NameSuffix) > 0 { + metric.AddSuffix(r.Config.NameSuffix) } - dropped := ro.buffer.Add(metric) - atomic.AddInt64(&ro.droppedMetrics, int64(dropped)) + dropped := r.buffer.Add(metric) + atomic.AddInt64(&r.droppedMetrics, int64(dropped)) - count := atomic.AddInt64(&ro.newMetricsCount, 1) - if count == int64(ro.MetricBatchSize) { - atomic.StoreInt64(&ro.newMetricsCount, 0) + count := atomic.AddInt64(&r.newMetricsCount, 1) + if count == int64(r.MetricBatchSize) { + atomic.StoreInt64(&r.newMetricsCount, 0) select { - case ro.BatchReady <- time.Now(): + case r.BatchReady <- time.Now(): default: } } @@ -179,50 +177,50 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { // Write writes all metrics to the output, stopping when all have been sent on // or error. -func (ro *RunningOutput) Write() error { - if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { - ro.aggMutex.Lock() +func (r *RunningOutput) Write() error { + if output, ok := r.Output.(telegraf.AggregatingOutput); ok { + r.aggMutex.Lock() metrics := output.Push() - ro.buffer.Add(metrics...) + r.buffer.Add(metrics...) output.Reset() - ro.aggMutex.Unlock() + r.aggMutex.Unlock() } - atomic.StoreInt64(&ro.newMetricsCount, 0) + atomic.StoreInt64(&r.newMetricsCount, 0) // Only process the metrics in the buffer now. Metrics added while we are // writing will be sent on the next call. - nBuffer := ro.buffer.Len() - nBatches := nBuffer/ro.MetricBatchSize + 1 + nBuffer := r.buffer.Len() + nBatches := nBuffer/r.MetricBatchSize + 1 for i := 0; i < nBatches; i++ { - batch := ro.buffer.Batch(ro.MetricBatchSize) + batch := r.buffer.Batch(r.MetricBatchSize) if len(batch) == 0 { break } - err := ro.write(batch) + err := r.write(batch) if err != nil { - ro.buffer.Reject(batch) + r.buffer.Reject(batch) return err } - ro.buffer.Accept(batch) + r.buffer.Accept(batch) } return nil } // WriteBatch writes a single batch of metrics to the output. -func (ro *RunningOutput) WriteBatch() error { - batch := ro.buffer.Batch(ro.MetricBatchSize) +func (r *RunningOutput) WriteBatch() error { + batch := r.buffer.Batch(r.MetricBatchSize) if len(batch) == 0 { return nil } - err := ro.write(batch) + err := r.write(batch) if err != nil { - ro.buffer.Reject(batch) + r.buffer.Reject(batch) return err } - ro.buffer.Accept(batch) + r.buffer.Accept(batch) return nil } diff --git a/models/running_output_test.go b/models/running_output_test.go index 38f79f9db397d..8e8d9a995fdf8 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -29,14 +29,6 @@ var next5 = []telegraf.Metric{ testutil.TestMetric(101, "metric10"), } -func reverse(metrics []telegraf.Metric) []telegraf.Metric { - result := make([]telegraf.Metric, 0, len(metrics)) - for i := len(metrics) - 1; i >= 0; i-- { - result = append(result, metrics[i]) - } - return result -} - // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ @@ -44,7 +36,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) { } m := &perfOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -59,7 +51,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { } m := &perfOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -77,7 +69,7 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { m := &perfOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -94,7 +86,7 @@ func TestRunningOutput_DropFilter(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -119,7 +111,7 @@ func TestRunningOutput_PassFilter(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -144,7 +136,7 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -165,7 +157,7 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -186,7 +178,7 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -207,7 +199,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -225,7 +217,7 @@ func TestRunningOutput_NameOverride(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -243,7 +235,7 @@ func TestRunningOutput_NamePrefix(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -261,7 +253,7 @@ func TestRunningOutput_NameSuffix(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -279,7 +271,7 @@ func TestRunningOutputDefault(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -301,7 +293,7 @@ func TestRunningOutputWriteFail(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 4, 12) + ro := NewRunningOutput(m, conf, 4, 12) // Fill buffer to limit twice for _, metric := range first5 { @@ -334,7 +326,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 100, 1000) + ro := NewRunningOutput(m, conf, 100, 1000) // add 5 metrics for _, metric := range first5 { @@ -372,7 +364,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 5, 100) + ro := NewRunningOutput(m, conf, 5, 100) // add 5 metrics for _, metric := range first5 { @@ -436,7 +428,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 5, 1000) + ro := NewRunningOutput(m, conf, 5, 1000) // add 5 metrics for _, metric := range first5 { @@ -470,7 +462,6 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { func TestInternalMetrics(t *testing.T) { _ = NewRunningOutput( - "test_internal", &mockOutput{}, &OutputConfig{ Filter: Filter{}, @@ -541,16 +532,14 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error { m.Lock() defer m.Unlock() if m.failWrite { - return fmt.Errorf("Failed Write!") + return fmt.Errorf("failed write") } if m.metrics == nil { m.metrics = []telegraf.Metric{} } - for _, metric := range metrics { - m.metrics = append(m.metrics, metric) - } + m.metrics = append(m.metrics, metrics...) return nil } @@ -581,9 +570,9 @@ func (m *perfOutput) SampleConfig() string { return "" } -func (m *perfOutput) Write(metrics []telegraf.Metric) error { +func (m *perfOutput) Write(_ []telegraf.Metric) error { if m.failWrite { - return fmt.Errorf("Failed Write!") + return fmt.Errorf("failed write") } return nil } diff --git a/models/running_processor.go b/models/running_processor.go index 1bd2d0f6ed0c7..5201fb27f19c0 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -52,8 +52,8 @@ func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { metric.Drop() } -func (r *RunningProcessor) Init() error { - if p, ok := r.Processor.(telegraf.Initializer); ok { +func (rp *RunningProcessor) Init() error { + if p, ok := rp.Processor.(telegraf.Initializer); ok { err := p.Init() if err != nil { return err @@ -62,39 +62,39 @@ func (r *RunningProcessor) Init() error { return nil } -func (r *RunningProcessor) Log() telegraf.Logger { - return r.log +func (rp *RunningProcessor) Log() telegraf.Logger { + return rp.log } -func (r *RunningProcessor) LogName() string { - return logName("processors", r.Config.Name, r.Config.Alias) +func (rp *RunningProcessor) LogName() string { + return logName("processors", rp.Config.Name, rp.Config.Alias) } -func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { +func (rp *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } -func (r *RunningProcessor) Start(acc telegraf.Accumulator) error { - return r.Processor.Start(acc) +func (rp *RunningProcessor) Start(acc telegraf.Accumulator) error { + return rp.Processor.Start(acc) } -func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { - if ok := r.Config.Filter.Select(m); !ok { +func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { + if ok := rp.Config.Filter.Select(m); !ok { // pass downstream acc.AddMetric(m) return nil } - r.Config.Filter.Modify(m) + rp.Config.Filter.Modify(m) if len(m.FieldList()) == 0 { // drop metric - r.metricFiltered(m) + rp.metricFiltered(m) return nil } - return r.Processor.Add(m, acc) + return rp.Processor.Add(m, acc) } -func (r *RunningProcessor) Stop() { - r.Processor.Stop() +func (rp *RunningProcessor) Stop() { + rp.Processor.Stop() } diff --git a/plugin.go b/plugin.go index 0793fbb061115..f9dcaeac0344c 100644 --- a/plugin.go +++ b/plugin.go @@ -1,5 +1,7 @@ package telegraf +var Debug bool + // Initializer is an interface that all plugin types: Inputs, Outputs, // Processors, and Aggregators can optionally implement to initialize the // plugin. @@ -21,7 +23,7 @@ type PluginDescriber interface { Description() string } -// Logger defines an interface for logging. +// Logger defines an plugin-related interface for logging. type Logger interface { // Errorf logs an error message, patterned after log.Printf. Errorf(format string, args ...interface{}) diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index eabfaa4bf8460..20d5b5ea2e482 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -1,10 +1,13 @@ package all import ( + //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" + _ "github.com/influxdata/telegraf/plugins/aggregators/derivative" _ "github.com/influxdata/telegraf/plugins/aggregators/final" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" _ "github.com/influxdata/telegraf/plugins/aggregators/merge" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" + _ "github.com/influxdata/telegraf/plugins/aggregators/quantile" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" ) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index 8fef0c6f4886a..f13dd8f375682 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -16,7 +16,7 @@ emitting the aggregate every `period` seconds. drop_original = false ## Configures which basic stats to push as fields - # stats = ["count","diff","min","max","mean","non_negative_diff","stdev","s2","sum"] + # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] ``` - stats @@ -28,13 +28,16 @@ emitting the aggregate every `period` seconds. - measurement1 - field1_count - field1_diff (difference) + - field1_rate (rate per second) - field1_max - field1_min - field1_mean - field1_non_negative_diff (non-negative difference) + - field1_non_negative_rate (non-negative rate per second) - field1_sum - field1_s2 (variance) - field1_stdev (standard deviation) + - field1_interval (interval in nanoseconds) ### Tags: @@ -46,8 +49,8 @@ No tags are applied by this aggregator. $ telegraf --config telegraf.conf --quiet system,host=tars load1=1 1475583980000000000 system,host=tars load1=1 1475583990000000000 -system,host=tars load1_count=2,load1_diff=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000 +system,host=tars load1_count=2,load1_diff=0,load1_rate=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0,load1_interval=10000000000i 1475584010000000000 system,host=tars load1=1 1475584020000000000 system,host=tars load1=3 1475584030000000000 -system,host=tars load1_count=2,load1_diff=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000 +system,host=tars load1_count=2,load1_diff=2,load1_rate=0.2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162,load1_interval=10000000000i 1475584010000000000 ``` diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 4e62ee31123a4..4ad6c77056314 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -2,6 +2,7 @@ package basicstats import ( "math" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" @@ -16,15 +17,18 @@ type BasicStats struct { } type configuredStats struct { - count bool - min bool - max bool - mean bool - variance bool - stdev bool - sum bool - diff bool - non_negative_diff bool + count bool + min bool + max bool + mean bool + variance bool + stdev bool + sum bool + diff bool + nonNegativeDiff bool + rate bool + nonNegativeRate bool + interval bool } func NewBasicStats() *BasicStats { @@ -40,14 +44,17 @@ type aggregate struct { } type basicstats struct { - count float64 - min float64 - max float64 - sum float64 - mean float64 - diff float64 - M2 float64 //intermediate value for variance/stdev - LAST float64 //intermediate value for diff + count float64 + min float64 + max float64 + sum float64 + mean float64 + diff float64 + rate float64 + interval time.Duration + M2 float64 //intermediate value for variance/stdev + LAST float64 //intermediate value for diff + TIME time.Time //intermediate value for rate } var sampleConfig = ` @@ -88,8 +95,10 @@ func (b *BasicStats) Add(in telegraf.Metric) { mean: fv, sum: fv, diff: 0.0, + rate: 0.0, M2: 0.0, LAST: fv, + TIME: in.Time(), } } } @@ -100,14 +109,17 @@ func (b *BasicStats) Add(in telegraf.Metric) { if _, ok := b.cache[id].fields[field.Key]; !ok { // hit an uncached field of a cached metric b.cache[id].fields[field.Key] = basicstats{ - count: 1, - min: fv, - max: fv, - mean: fv, - sum: fv, - diff: 0.0, - M2: 0.0, - LAST: fv, + count: 1, + min: fv, + max: fv, + mean: fv, + sum: fv, + diff: 0.0, + rate: 0.0, + interval: 0, + M2: 0.0, + LAST: fv, + TIME: in.Time(), } continue } @@ -138,6 +150,12 @@ func (b *BasicStats) Add(in telegraf.Metric) { tmp.sum += fv //diff compute tmp.diff = fv - tmp.LAST + //interval compute + tmp.interval = in.Time().Sub(tmp.TIME) + //rate compute + if !in.Time().Equal(tmp.TIME) { + tmp.rate = tmp.diff / tmp.interval.Seconds() + } //store final data b.cache[id].fields[field.Key] = tmp } @@ -149,7 +167,6 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) { for _, aggregate := range b.cache { fields := map[string]interface{}{} for k, v := range aggregate.fields { - if b.statsConfig.count { fields[k+"_count"] = v.count } @@ -179,10 +196,18 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) { if b.statsConfig.diff { fields[k+"_diff"] = v.diff } - if b.statsConfig.non_negative_diff && v.diff >= 0 { + if b.statsConfig.nonNegativeDiff && v.diff >= 0 { fields[k+"_non_negative_diff"] = v.diff } - + if b.statsConfig.rate { + fields[k+"_rate"] = v.rate + } + if b.statsConfig.nonNegativeRate && v.diff >= 0 { + fields[k+"_non_negative_rate"] = v.rate + } + if b.statsConfig.interval { + fields[k+"_interval"] = v.interval.Nanoseconds() + } } //if count == 1 StdDev = infinite => so I won't send data } @@ -216,8 +241,13 @@ func (b *BasicStats) parseStats() *configuredStats { case "diff": parsed.diff = true case "non_negative_diff": - parsed.non_negative_diff = true - + parsed.nonNegativeDiff = true + case "rate": + parsed.rate = true + case "non_negative_rate": + parsed.nonNegativeRate = true + case "interval": + parsed.interval = true default: b.Log.Warnf("Unrecognized basic stat %q, ignoring", name) } @@ -229,14 +259,16 @@ func (b *BasicStats) parseStats() *configuredStats { func (b *BasicStats) getConfiguredStats() { if b.Stats == nil { b.statsConfig = &configuredStats{ - count: true, - min: true, - max: true, - mean: true, - variance: true, - stdev: true, - sum: false, - non_negative_diff: false, + count: true, + min: true, + max: true, + mean: true, + variance: true, + stdev: true, + sum: false, + nonNegativeDiff: false, + rate: false, + nonNegativeRate: false, } } else { b.statsConfig = b.parseStats() diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index c5a093840abc7..51ecd5c992442 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" ) -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -19,9 +19,9 @@ var m1, _ = metric.New("m1", "d": float64(2), "g": int64(3), }, - time.Now(), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -34,7 +34,7 @@ var m2, _ = metric.New("m1", "andme": true, "g": int64(1), }, - time.Now(), + time.Date(2000, 1, 1, 0, 0, 0, 1e6, time.UTC), ) func BenchmarkApply(b *testing.B) { @@ -184,7 +184,6 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { // Test only aggregating count func TestBasicStatsWithOnlyCount(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"count"} aggregator.Log = testutil.Logger{} @@ -213,7 +212,6 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { // Test only aggregating minimum func TestBasicStatsWithOnlyMin(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"min"} aggregator.Log = testutil.Logger{} @@ -242,7 +240,6 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { // Test only aggregating maximum func TestBasicStatsWithOnlyMax(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"max"} aggregator.Log = testutil.Logger{} @@ -271,7 +268,6 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { // Test only aggregating mean func TestBasicStatsWithOnlyMean(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"mean"} aggregator.Log = testutil.Logger{} @@ -300,7 +296,6 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { // Test only aggregating sum func TestBasicStatsWithOnlySum(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} aggregator.Log = testutil.Logger{} @@ -331,29 +326,28 @@ func TestBasicStatsWithOnlySum(t *testing.T) { // implementations of sum were calculated from mean and count, which // e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8. func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { - - var sum1, _ = metric.New("m1", + var sum1 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), }, time.Now(), ) - var sum2, _ = metric.New("m1", + var sum2 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), }, time.Now(), ) - var sum3, _ = metric.New("m1", + var sum3 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(5), }, time.Now(), ) - var sum4, _ = metric.New("m1", + var sum4 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), @@ -383,7 +377,6 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { // Test only aggregating variance func TestBasicStatsWithOnlyVariance(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"s2"} aggregator.Log = testutil.Logger{} @@ -410,7 +403,6 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { // Test only aggregating standard deviation func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"stdev"} aggregator.Log = testutil.Logger{} @@ -437,7 +429,6 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { // Test only aggregating minimum and maximum func TestBasicStatsWithMinAndMax(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"min", "max"} aggregator.Log = testutil.Logger{} @@ -473,7 +464,6 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { // Test only aggregating diff func TestBasicStatsWithDiff(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"diff"} aggregator.Log = testutil.Logger{} @@ -498,9 +488,80 @@ func TestBasicStatsWithDiff(t *testing.T) { acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) } +func TestBasicStatsWithRate(t *testing.T) { + aggregator := NewBasicStats() + aggregator.Stats = []string{"rate"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + expectedFields := map[string]interface{}{ + "a_rate": float64(0), + "b_rate": float64(2000), + "c_rate": float64(2000), + "d_rate": float64(4000), + "g_rate": float64(-2000), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +func TestBasicStatsWithNonNegativeRate(t *testing.T) { + aggregator := NewBasicStats() + aggregator.Stats = []string{"non_negative_rate"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_non_negative_rate": float64(0), + "b_non_negative_rate": float64(2000), + "c_non_negative_rate": float64(2000), + "d_non_negative_rate": float64(4000), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} +func TestBasicStatsWithInterval(t *testing.T) { + aggregator := NewBasicStats() + aggregator.Stats = []string{"interval"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_interval": int64(time.Millisecond), + "b_interval": int64(time.Millisecond), + "c_interval": int64(time.Millisecond), + "d_interval": int64(time.Millisecond), + "g_interval": int64(time.Millisecond), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + // Test only aggregating non_negative_diff func TestBasicStatsWithNonNegativeDiff(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"non_negative_diff"} aggregator.Log = testutil.Logger{} @@ -591,7 +652,6 @@ func TestBasicStatsWithAllStats(t *testing.T) { // Test that if an empty array is passed, no points are pushed func TestBasicStatsWithNoStats(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{} aggregator.Log = testutil.Logger{} @@ -608,7 +668,6 @@ func TestBasicStatsWithNoStats(t *testing.T) { // Test that if an unknown stat is configured, it doesn't explode func TestBasicStatsWithUnknownStat(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"crazy"} aggregator.Log = testutil.Logger{} @@ -628,7 +687,6 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { // otherwise user's working systems will suddenly (and surprisingly) start // capturing sum without their input. func TestBasicStatsWithDefaultStats(t *testing.T) { - aggregator := NewBasicStats() aggregator.Log = testutil.Logger{} aggregator.getConfiguredStats() diff --git a/plugins/aggregators/derivative/README.md b/plugins/aggregators/derivative/README.md new file mode 100644 index 0000000000000..3ca29c36d4f49 --- /dev/null +++ b/plugins/aggregators/derivative/README.md @@ -0,0 +1,166 @@ +# Derivative Aggregator Plugin +The Derivative Aggregator Plugin estimates the derivative for all fields of the +aggregated metrics. + +### Time Derivatives + +In its default configuration it determines the first and last measurement of +the period. From these measurements the time difference in seconds is +calculated. This time difference is than used to divide the difference of each +field using the following formula: +``` + field_last - field_first +derivative = -------------------------- + time_difference +``` +For each field the derivative is emitted with a naming pattern +`_rate`. + +### Custom Derivation Variable + +The plugin supports to use a field of the aggregated measurements as derivation +variable in the denominator. This variable is assumed to be a monotonically +increasing value. In this feature the following formula is used: +``` + field_last - field_first +derivative = -------------------------------- + variable_last - variable_first +``` +**Make sure the specified variable is not filtered and exists in the metrics passed to this aggregator!** + +When using a custom derivation variable, you should change the `suffix` of the derivative name. +See the next section on [customizing the derivative name](#customize-the-derivative-name) for details. + +### Customize the Derivative Name + +The derivatives generated by the aggregator are named `_rate`, i.e. they are composed of the field name and a suffix `_rate`. +You can configure the suffix to be used by changing the `suffix` parameter. + +### Roll-Over to next Period + +Calculating the derivative for a period requires at least two distinct measurements during that period. +Whether those are available depends on the configuration of the aggregator `period` and the agent `interval`. +By default the last measurement is used as first measurement in the next +aggregation period. This enables a continuous calculation of the derivative. If +within the next period an earlier timestamp is encountered this measurement will +replace the roll-over metric. A main benefit of this roll-over is the ability to +cope with multiple "quiet" periods, where no new measurement is pushed to the +aggregator. The roll-over will take place at most `max_roll_over` times. + +#### Example of Roll-Over + +Let us assume we have an input plugin, that generates a measurement with a single metric "test" every 2 seconds. +Let this metric increase the first 10 seconds from 0.0 to 10.0 and then decrease the next 10 seconds form 10.0 to 0.0: + +| timestamp | value | +|-----------|-------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +| 8 | 8.0 | +| 10 | 10.0 | +| 12 | 8.0 | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +| 20 | 0.0 | + +To avoid thinking about border values, we consider periods to be inclusive at the start but exclusive in the end. +Using `period = "10s"` and `max_roll_over = 0` we would get the following aggregates: + +| timestamp | value | aggregate | explanantion | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +| 8 | 8.0 | +||| 1.0 | (8.0 - 0.0) / (8 - 0) | +| 10 | 10.0 | +| 12 | 8.0 | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +||| -1.0 | (2.0 - 10.0) / (18 - 10) +| 20 | 0.0 | + +If we now decrease the period with `period = 2s`, no derivative could be calculated since there would only one measurement for each period. +The aggregator will emit the log messages `Same first and last event for "test", skipping.`. +This changes, if we use `max_roll_over = 1`, since now end measurements of a period are taking as start for the next period. + +| timestamp | value | aggregate | explanantion | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | 1.0 | (2.0 - 0.0) / (2 - 0) | +| 4 | 4.0 | 1.0 | (4.0 - 2.0) / (4 - 2) | +| 6 | 6.0 | 1.0 | (6.0 - 4.0) / (6 - 4) | +| 8 | 8.0 | 1.0 | (8.0 - 6.0) / (8 - 6) | +| 10 | 10.0 | 1.0 | (10.0 - 8.0) / (10 - 8) | +| 12 | 8.0 | -1.0 | (8.0 - 10.0) / (12 - 10) | +| 14 | 6.0 | -1.0 | (6.0 - 8.0) / (14 - 12) | +| 16 | 4.0 | -1.0 | (4.0 - 6.0) / (16 - 14) | +| 18 | 2.0 | -1.0 | (2.0 - 4.0) / (18 - 16) | +| 20 | 0.0 | -1.0 | (0.0 - 2.0) / (20 - 18) | + +The default `max_roll_over = 10` allows for multiple periods without measurements either due to configuration or missing input. + +There may be a slight difference in the calculation when using `max_roll_over` compared to running without. +To illustrate this, let us compare the derivatives for `period = "7s"`. + +| timestamp | value | `max_roll_over = 0` | `max_roll_over = 1` | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +||| 1.0 | 1.0 | +| 8 | 8.0 | +| 10 | 10.0 | +| 12 | 8.0 | +||| 0.0 | 0.33... | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +| 20 | 0.0 | +||| -1.0 | -1.0 | + +The difference stems from the change of the value between periods, e.g. from 6.0 to 8.0 between first and second period. +Thoses changes are omitted with `max_roll_over = 0` but are respected with `max_roll_over = 1`. +That there are no more differences in the calculated derivatives is due to the example data, which has constant derivatives in during the first and last period, even when including the gap between the periods. +Using `max_roll_over` with a value greater 0 may be important, if you need to detect changes between periods, e.g. when you have very few measurements in a period or quasi-constant metrics with only occasional changes. + +### Configuration + +```toml +[[aggregators.derivative]] + ## Specific Derivative Aggregator Arguments: + + ## Configure a custom derivation variable. Timestamp is used if none is given. + # variable = "" + + ## Suffix to add to the field name for the derivative name. + # suffix = "_rate" + + ## Roll-Over last measurement to first measurement of next period + # max_roll_over = 10 + + ## General Aggregator Arguments: + + ## calculate derivative every 30 seconds + period = "30s" +``` + +### Tags: +No tags are applied by this aggregator. +Existing tags are passed throug the aggregator untouched. + +### Example Output + +``` +net bytes_recv=15409i,packets_recv=164i,bytes_sent=16649i,packets_sent=120i 1508843640000000000 +net bytes_recv=73987i,packets_recv=364i,bytes_sent=87328i,packets_sent=452i 1508843660000000000 +net bytes_recv_by_packets_recv=292.89 1508843660000000000 +net packets_sent_rate=16.6,bytes_sent_rate=3533.95 1508843660000000000 +net bytes_sent_by_packet=292.89 1508843660000000000 +``` diff --git a/plugins/aggregators/derivative/derivative.go b/plugins/aggregators/derivative/derivative.go new file mode 100644 index 0000000000000..f9e9c33de96c9 --- /dev/null +++ b/plugins/aggregators/derivative/derivative.go @@ -0,0 +1,224 @@ +package derivative + +import ( + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type Derivative struct { + Variable string `toml:"variable"` + Suffix string `toml:"suffix"` + MaxRollOver uint `toml:"max_roll_over"` + Log telegraf.Logger `toml:"-"` + cache map[uint64]*aggregate +} + +type aggregate struct { + first *event + last *event + name string + tags map[string]string + rollOver uint +} + +type event struct { + fields map[string]float64 + time time.Time +} + +const defaultSuffix = "_rate" + +func NewDerivative() *Derivative { + derivative := &Derivative{Suffix: defaultSuffix, MaxRollOver: 10} + derivative.cache = make(map[uint64]*aggregate) + derivative.Reset() + return derivative +} + +var sampleConfig = ` + ## The period in which to flush the aggregator. + period = "30s" + ## + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + ## + ## This aggregator will estimate a derivative for each field, which is + ## contained in both the first and last metric of the aggregation interval. + ## Without further configuration the derivative will be calculated with + ## respect to the time difference between these two measurements in seconds. + ## The formula applied is for every field: + ## + ## value_last - value_first + ## derivative = -------------------------- + ## time_difference_in_seconds + ## + ## The resulting derivative will be named *fieldname_rate*. The suffix + ## "_rate" can be configured by the *suffix* parameter. When using a + ## derivation variable you can include its name for more clarity. + # suffix = "_rate" + ## + ## As an abstraction the derivative can be calculated not only by the time + ## difference but by the difference of a field, which is contained in the + ## measurement. This field is assumed to be monotonously increasing. This + ## feature is used by specifying a *variable*. + ## Make sure the specified variable is not filtered and exists in the metrics + ## passed to this aggregator! + # variable = "" + ## + ## When using a field as the derivation parameter the name of that field will + ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. + ## + ## Note, that the calculation is based on the actual timestamp of the + ## measurements. When there is only one measurement during that period, the + ## measurement will be rolled over to the next period. The maximum number of + ## such roll-overs can be configured with a default of 10. + # max_roll_over = 10 + ## +` + +func (d *Derivative) SampleConfig() string { + return sampleConfig +} + +func (d *Derivative) Description() string { + return "Calculates a derivative for every field." +} + +func (d *Derivative) Add(in telegraf.Metric) { + id := in.HashID() + current, ok := d.cache[id] + if !ok { + // hit an uncached metric, create caches for first time: + d.cache[id] = newAggregate(in) + return + } + if current.first.time.After(in.Time()) { + current.first = newEvent(in) + current.rollOver = 0 + } else if current.first.time.Equal(in.Time()) { + upsertConvertedFields(in.Fields(), current.first.fields) + current.rollOver = 0 + } + if current.last.time.Before(in.Time()) { + current.last = newEvent(in) + current.rollOver = 0 + } else if current.last.time.Equal(in.Time()) { + upsertConvertedFields(in.Fields(), current.last.fields) + current.rollOver = 0 + } +} + +func newAggregate(in telegraf.Metric) *aggregate { + event := newEvent(in) + return &aggregate{ + name: in.Name(), + tags: in.Tags(), + first: event, + last: event, + rollOver: 0, + } +} + +func newEvent(in telegraf.Metric) *event { + return &event{ + fields: extractConvertedFields(in), + time: in.Time(), + } +} + +func extractConvertedFields(in telegraf.Metric) map[string]float64 { + fields := make(map[string]float64, len(in.Fields())) + upsertConvertedFields(in.Fields(), fields) + return fields +} + +func upsertConvertedFields(source map[string]interface{}, target map[string]float64) { + for k, v := range source { + if value, ok := convert(v); ok { + target[k] = value + } + } +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + } + return 0, false +} + +func (d *Derivative) Push(acc telegraf.Accumulator) { + for _, aggregate := range d.cache { + if aggregate.first == aggregate.last { + d.Log.Debugf("Same first and last event for %q, skipping.", aggregate.name) + continue + } + var denominator float64 + denominator = aggregate.last.time.Sub(aggregate.first.time).Seconds() + if len(d.Variable) > 0 { + var first float64 + var last float64 + var found bool + if first, found = aggregate.first.fields[d.Variable]; !found { + d.Log.Debugf("Did not find %q in first event for %q.", d.Variable, aggregate.name) + continue + } + if last, found = aggregate.last.fields[d.Variable]; !found { + d.Log.Debugf("Did not find %q in last event for %q.", d.Variable, aggregate.name) + continue + } + denominator = last - first + } + if denominator == 0 { + d.Log.Debugf("Got difference 0 in denominator for %q, skipping.", aggregate.name) + continue + } + derivatives := make(map[string]interface{}) + for key, start := range aggregate.first.fields { + if key == d.Variable { + // Skip derivation variable + continue + } + if end, ok := aggregate.last.fields[key]; ok { + d.Log.Debugf("Adding derivative %q to %q.", key+d.Suffix, aggregate.name) + derivatives[key+d.Suffix] = (end - start) / denominator + } + } + acc.AddFields(aggregate.name, derivatives, aggregate.tags) + } +} + +func (d *Derivative) Reset() { + for id, aggregate := range d.cache { + if aggregate.rollOver < d.MaxRollOver { + aggregate.first = aggregate.last + aggregate.rollOver = aggregate.rollOver + 1 + d.cache[id] = aggregate + d.Log.Debugf("Roll-Over %q for the %d time.", aggregate.name, aggregate.rollOver) + } else { + delete(d.cache, id) + d.Log.Debugf("Removed %q from cache.", aggregate.name) + } + } +} + +func (d *Derivative) Init() error { + d.Suffix = strings.TrimSpace(d.Suffix) + d.Variable = strings.TrimSpace(d.Variable) + return nil +} + +func init() { + aggregators.Add("derivative", func() telegraf.Aggregator { + return NewDerivative() + }) +} diff --git a/plugins/aggregators/derivative/derivative_test.go b/plugins/aggregators/derivative/derivative_test.go new file mode 100644 index 0000000000000..fb84dae6ff54a --- /dev/null +++ b/plugins/aggregators/derivative/derivative_test.go @@ -0,0 +1,403 @@ +package derivative + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" +) + +var start = metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "increasing": int64(0), + "decreasing": int64(100), + "unchanged": int64(42), + "ignored": "strings are not supported", + "parameter": float64(0.0), + }, + time.Now(), +) + +var finish = metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "increasing": int64(1000), + "decreasing": int64(0), + "unchanged": int64(42), + "ignored": "strings are not supported", + "parameter": float64(10.0), + }, + time.Now().Add(time.Second), +) + +func TestTwoFullEventsWithParameter(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsWithParameterReverseSequence(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(finish) + derivative.Add(start) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsWithoutParameter(t *testing.T) { + acc := testutil.Accumulator{} + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + derivative.Init() + + startTime := time.Now() + duration, _ := time.ParseDuration("2s") + endTime := startTime.Add(duration) + + first := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(10), + }, + startTime, + ) + last := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(20), + }, + endTime, + ) + + derivative.Add(first) + derivative.Add(last) + derivative.Push(&acc) + + acc.AssertContainsFields(t, + "One Field", + map[string]interface{}{ + "value_rate": float64(5), + }, + ) +} + +func TestTwoFullEventsInSeperatePushes(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: " parameter", + Suffix: "_wrt_parameter", + MaxRollOver: 10, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_wrt_parameter": 100.0, + "decreasing_wrt_parameter": -10.0, + "unchanged_wrt_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsInSeperatePushesWithSeveralRollOvers(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_wrt_parameter", + MaxRollOver: 10, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + derivative.Push(&acc) + derivative.Push(&acc) + derivative.Push(&acc) + + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_wrt_parameter": 100.0, + "decreasing_wrt_parameter": -10.0, + "unchanged_wrt_parameter": 0.0, + } + + acc.AssertContainsFields(t, "TestMetric", expectedFields) +} + +func TestTwoFullEventsInSeperatePushesWithOutRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + MaxRollOver: 0, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + // This test relies on RunningAggregator always callining Reset after Push + // to remove the first metric after max-rollover of 0 has been reached. + derivative.Push(&acc) + derivative.Reset() + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + derivative.Add(finish) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") +} + +func TestIgnoresMissingVariable(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + noParameter := metric.New("TestMetric", + map[string]string{"state": "no_parameter"}, + map[string]interface{}{ + "increasing": int64(100), + "decreasing": int64(0), + "unchanged": int64(42), + }, + time.Now(), + ) + + derivative.Add(noParameter) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + derivative.Add(noParameter) + derivative.Add(start) + derivative.Add(noParameter) + derivative.Add(finish) + derivative.Add(noParameter) + derivative.Push(&acc) + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestMergesDifferenMetricsWithSameHash(t *testing.T) { + acc := testutil.Accumulator{} + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + derivative.Init() + + startTime := time.Now() + duration, _ := time.ParseDuration("2s") + endTime := startTime.Add(duration) + part1 := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{"field1": int64(10)}, + startTime, + ) + part2 := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{"field2": int64(20)}, + startTime, + ) + final := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "field1": int64(30), + "field2": int64(30), + }, + endTime, + ) + + derivative.Add(part1) + derivative.Push(&acc) + derivative.Add(part2) + derivative.Push(&acc) + derivative.Add(final) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "field1_rate": 10.0, + "field2_rate": 5.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestDropsAggregatesOnMaxRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + MaxRollOver: 1, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + derivative.Reset() + derivative.Push(&acc) + derivative.Reset() + derivative.Add(finish) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") +} + +func TestAddMetricsResetsRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + MaxRollOver: 1, + cache: make(map[uint64]*aggregate), + Log: testutil.Logger{}, + } + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + derivative.Reset() + derivative.Add(start) + derivative.Reset() + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + + acc.AssertContainsFields(t, "TestMetric", expectedFields) +} + +func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { + acc := testutil.Accumulator{} + period, _ := time.ParseDuration("10s") + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + derivative.Init() + + startTime := time.Now() + first := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(10), + }, + startTime, + ) + derivative.Add(first) + derivative.Push(&acc) + derivative.Reset() + + second := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(20), + }, + startTime.Add(period), + ) + derivative.Add(second) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertContainsFields(t, "One Field", map[string]interface{}{ + "value_rate": 1.0, + }) + + acc.ClearMetrics() + third := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(40), + }, + startTime.Add(period).Add(period), + ) + derivative.Add(third) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertContainsFields(t, "One Field", map[string]interface{}{ + "value_rate": 2.0, + }) +} diff --git a/plugins/aggregators/final/final.go b/plugins/aggregators/final/final.go index 53ad0a47c9d95..3ef32a10ab39f 100644 --- a/plugins/aggregators/final/final.go +++ b/plugins/aggregators/final/final.go @@ -4,7 +4,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/aggregators" ) @@ -20,7 +20,7 @@ var sampleConfig = ` ` type Final struct { - SeriesTimeout internal.Duration `toml:"series_timeout"` + SeriesTimeout config.Duration `toml:"series_timeout"` // The last metric for all series which are active metricCache map[uint64]telegraf.Metric @@ -28,7 +28,7 @@ type Final struct { func NewFinal() *Final { return &Final{ - SeriesTimeout: internal.Duration{Duration: 5 * time.Minute}, + SeriesTimeout: config.Duration(5 * time.Minute), metricCache: make(map[uint64]telegraf.Metric), } } @@ -51,7 +51,7 @@ func (m *Final) Push(acc telegraf.Accumulator) { acc.SetPrecision(time.Nanosecond) for id, metric := range m.metricCache { - if time.Since(metric.Time()) > m.SeriesTimeout.Duration { + if time.Since(metric.Time()) > time.Duration(m.SeriesTimeout) { fields := map[string]interface{}{} for _, field := range metric.FieldList() { fields[field.Key+"_final"] = field.Value diff --git a/plugins/aggregators/final/final_test.go b/plugins/aggregators/final/final_test.go index 1b3367fa5b3ad..6b0c6e8e38c24 100644 --- a/plugins/aggregators/final/final_test.go +++ b/plugins/aggregators/final/final_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -15,15 +15,15 @@ func TestSimple(t *testing.T) { final := NewFinal() tags := map[string]string{"foo": "bar"} - m1, _ := metric.New("m1", + m1 := metric.New("m1", tags, map[string]interface{}{"a": int64(1)}, time.Unix(1530939936, 0)) - m2, _ := metric.New("m1", + m2 := metric.New("m1", tags, map[string]interface{}{"a": int64(2)}, time.Unix(1530939937, 0)) - m3, _ := metric.New("m1", + m3 := metric.New("m1", tags, map[string]interface{}{"a": int64(3)}, time.Unix(1530939938, 0)) @@ -52,15 +52,15 @@ func TestTwoTags(t *testing.T) { tags1 := map[string]string{"foo": "bar"} tags2 := map[string]string{"foo": "baz"} - m1, _ := metric.New("m1", + m1 := metric.New("m1", tags1, map[string]interface{}{"a": int64(1)}, time.Unix(1530939936, 0)) - m2, _ := metric.New("m1", + m2 := metric.New("m1", tags2, map[string]interface{}{"a": int64(2)}, time.Unix(1530939937, 0)) - m3, _ := metric.New("m1", + m3 := metric.New("m1", tags1, map[string]interface{}{"a": int64(3)}, time.Unix(1530939938, 0)) @@ -93,24 +93,24 @@ func TestTwoTags(t *testing.T) { func TestLongDifference(t *testing.T) { acc := testutil.Accumulator{} final := NewFinal() - final.SeriesTimeout = internal.Duration{Duration: 30 * time.Second} + final.SeriesTimeout = config.Duration(30 * time.Second) tags := map[string]string{"foo": "bar"} now := time.Now() - m1, _ := metric.New("m", + m1 := metric.New("m", tags, map[string]interface{}{"a": int64(1)}, now.Add(time.Second*-290)) - m2, _ := metric.New("m", + m2 := metric.New("m", tags, map[string]interface{}{"a": int64(2)}, now.Add(time.Second*-275)) - m3, _ := metric.New("m", + m3 := metric.New("m", tags, map[string]interface{}{"a": int64(3)}, now.Add(time.Second*-100)) - m4, _ := metric.New("m", + m4 := metric.New("m", tags, map[string]interface{}{"a": int64(4)}, now.Add(time.Second*-20)) diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index dfb3f5d12dfa8..c2a05cc283c3d 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -25,7 +25,7 @@ func NewTestHistogram(cfg []config, reset bool, cumulative bool) telegraf.Aggreg } // firstMetric1 is the first test metric -var firstMetric1, _ = metric.New( +var firstMetric1 = metric.New( "first_metric_name", tags{}, fields{ @@ -36,7 +36,7 @@ var firstMetric1, _ = metric.New( ) // firstMetric1 is the first test metric with other value -var firstMetric2, _ = metric.New( +var firstMetric2 = metric.New( "first_metric_name", tags{}, fields{ @@ -47,7 +47,7 @@ var firstMetric2, _ = metric.New( ) // secondMetric is the second metric -var secondMetric, _ = metric.New( +var secondMetric = metric.New( "second_metric_name", tags{}, fields{ @@ -210,7 +210,6 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { // TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates // getting added in different periods) for all fields func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { - var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, false, true) diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go index 083c8fd3e6b0a..e11aad07a4b7d 100644 --- a/plugins/aggregators/merge/merge.go +++ b/plugins/aggregators/merge/merge.go @@ -19,7 +19,6 @@ const ( type Merge struct { grouper *metric.SeriesGrouper - log telegraf.Logger } func (a *Merge) Init() error { @@ -36,13 +35,7 @@ func (a *Merge) SampleConfig() string { } func (a *Merge) Add(m telegraf.Metric) { - tags := m.Tags() - for _, field := range m.FieldList() { - err := a.grouper.Add(m.Name(), tags, m.Time(), field.Key, field.Value) - if err != nil { - a.log.Errorf("Error adding metric: %v", err) - } - } + a.grouper.AddMetric(m) } func (a *Merge) Push(acc telegraf.Accumulator) { diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go index 2f2703c8f4b7c..94e54590b586f 100644 --- a/plugins/aggregators/merge/merge_test.go +++ b/plugins/aggregators/merge/merge_test.go @@ -4,9 +4,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSimple(t *testing.T) { @@ -184,3 +186,68 @@ func TestReset(t *testing.T) { testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } + +var m1 = metric.New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f1": 1, + "f2": 2, + "f3": 3, + "f4": 4, + "f5": 5, + "f6": 6, + "f7": 7, + "f8": 8, + }, + time.Now(), +) +var m2 = metric.New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f8": 8, + "f9": 9, + "f10": 10, + "f11": 11, + "f12": 12, + "f13": 13, + "f14": 14, + "f15": 15, + "f16": 16, + }, + m1.Time(), +) + +func BenchmarkMergeOne(b *testing.B) { + var merger Merge + merger.Init() + var acc testutil.NopAccumulator + + for n := 0; n < b.N; n++ { + merger.Reset() + merger.Add(m1) + merger.Push(&acc) + } +} + +func BenchmarkMergeTwo(b *testing.B) { + var merger Merge + merger.Init() + var acc testutil.NopAccumulator + + for n := 0; n < b.N; n++ { + merger.Reset() + merger.Add(m1) + merger.Add(m2) + merger.Push(&acc) + } +} diff --git a/plugins/aggregators/minmax/minmax_test.go b/plugins/aggregators/minmax/minmax_test.go index e7c3cf4eb2024..7835d95e9c72e 100644 --- a/plugins/aggregators/minmax/minmax_test.go +++ b/plugins/aggregators/minmax/minmax_test.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -24,7 +24,7 @@ var m1, _ = metric.New("m1", }, time.Now(), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), diff --git a/plugins/aggregators/quantile/README.md b/plugins/aggregators/quantile/README.md new file mode 100644 index 0000000000000..77d0f856409ec --- /dev/null +++ b/plugins/aggregators/quantile/README.md @@ -0,0 +1,127 @@ +# Quantile Aggregator Plugin + +The quantile aggregator plugin aggregates specified quantiles for each numeric field +per metric it sees and emits the quantiles every `period`. + +### Configuration + +```toml +[[aggregators.quantile]] + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## Quantiles to output in the range [0,1] + # quantiles = [0.25, 0.5, 0.75] + + ## Type of aggregation algorithm + ## Supported are: + ## "t-digest" -- approximation using centroids, can cope with large number of samples + ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) + ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) + ## NOTE: Do not use "exact" algorithms with large number of samples + ## to not impair performance or memory consumption! + # algorithm = "t-digest" + + ## Compression for approximation (t-digest). The value needs to be + ## greater or equal to 1.0. Smaller values will result in more + ## performance but less accuracy. + # compression = 100.0 +``` + +#### Algorithm types +##### t-digest +Proposed by [Dunning & Ertl (2019)][tdigest_paper] this type uses a +special data-structure to cluster data. These clusters are later used +to approximate the requested quantiles. The bounds of the approximation +can be controlled by the `compression` setting where smaller values +result in higher performance but less accuracy. + +Due to its incremental nature, this algorithm can handle large +numbers of samples efficiently. It is recommended for applications +where exact quantile calculation isn't required. + +For implementation details see the underlying [golang library][tdigest_lib]. + +##### exact R7 and R8 +These algorithms compute quantiles as described in [Hyndman & Fan (1996)][hyndman_fan]. +The R7 variant is used in Excel and NumPy. The R8 variant is recommended +by Hyndman & Fan due to its independence of the underlying sample distribution. + +These algorithms save all data for the aggregation `period`. They require +a lot of memory when used with a large number of series or a +large number of samples. They are slower than the `t-digest` +algorithm and are recommended only to be used with a small number of samples and series. + + +#### Benchmark (linux/amd64) +The benchmark was performed by adding 100 metrics with six numeric +(and two non-numeric) fields to the aggregator and the derive the aggregation +result. + +| algorithm | # quantiles | avg. runtime | +| :------------ | -------------:| -------------:| +| t-digest | 3 | 376372 ns/op | +| exact R7 | 3 | 9782946 ns/op | +| exact R8 | 3 | 9158205 ns/op | +| t-digest | 100 | 899204 ns/op | +| exact R7 | 100 | 7868816 ns/op | +| exact R8 | 100 | 8099612 ns/op | + +### Measurements +Measurement names are passed trough this aggregator. + +### Fields + +For all numeric fields (int32/64, uint32/64 and float32/64) new *quantile* +fields are aggregated in the form `_`. Other field +types (e.g. boolean, string) are ignored and dropped from the output. + +For example passing in the following metric as *input*: +- somemetric + - average_response_ms (float64) + - minimum_response_ms (float64) + - maximum_response_ms (float64) + - status (string) + - ok (boolean) + +and the default setting for `quantiles ` you get the following *output* +- somemetric + - average_response_ms_025 (float64) + - average_response_ms_050 (float64) + - average_response_ms_075 (float64) + - minimum_response_ms_025 (float64) + - minimum_response_ms_050 (float64) + - minimum_response_ms_075 (float64) + - maximum_response_ms_025 (float64) + - maximum_response_ms_050 (float64) + - maximum_response_ms_075 (float64) + +The `status` and `ok` fields are dropped because they are not numeric. Note that the +number of resulting fields scales with the number of `quantiles` specified. + +### Tags + +Tags are passed through to the output by this aggregator. + +### Example Output + +``` +cpu,cpu=cpu-total,host=Hugin usage_user=10.814851731872487,usage_system=2.1679541490155687,usage_irq=1.046598554697342,usage_steal=0,usage_guest_nice=0,usage_idle=85.79616247197244,usage_nice=0,usage_iowait=0,usage_softirq=0.1744330924495688,usage_guest=0 1608288360000000000 +cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_system=2.1601016518428664,usage_iowait=0.02541296060990694,usage_irq=1.0165184243964942,usage_softirq=0.1778907242693666,usage_steal=0,usage_guest_nice=0,usage_user=9.275730622616953,usage_idle=87.34434561626493,usage_nice=0 1608288370000000000 +cpu,cpu=cpu-total,host=Hugin usage_idle=85.78199052131747,usage_nice=0,usage_irq=1.0476428036915637,usage_guest=0,usage_guest_nice=0,usage_system=1.995510102269591,usage_iowait=0,usage_softirq=0.1995510102269662,usage_steal=0,usage_user=10.975305562484735 1608288380000000000 +cpu,cpu=cpu-total,host=Hugin usage_guest_nice_075=0,usage_user_050=10.814851731872487,usage_guest_075=0,usage_steal_025=0,usage_irq_025=1.031558489546918,usage_irq_075=1.0471206791944527,usage_iowait_025=0,usage_guest_050=0,usage_guest_nice_050=0,usage_nice_075=0,usage_iowait_050=0,usage_system_050=2.1601016518428664,usage_irq_050=1.046598554697342,usage_guest_nice_025=0,usage_idle_050=85.79616247197244,usage_softirq_075=0.1887208672481664,usage_steal_075=0,usage_system_025=2.0778058770562287,usage_system_075=2.1640279004292173,usage_softirq_050=0.1778907242693666,usage_nice_050=0,usage_iowait_075=0.01270648030495347,usage_user_075=10.895078647178611,usage_nice_025=0,usage_steal_050=0,usage_user_025=10.04529117724472,usage_idle_025=85.78907649664495,usage_idle_075=86.57025404411868,usage_softirq_025=0.1761619083594677,usage_guest_025=0 1608288390000000000 +``` + +# References +- Dunning & Ertl: "Computing Extremely Accurate Quantiles Using t-Digests", arXiv:1902.04023 (2019) [pdf][tdigest_paper] +- Hyndman & Fan: "Sample Quantiles in Statistical Packages", The American Statistician, vol. 50, pp. 361-365 (1996) [pdf][hyndman_fan] + + +[tdigest_paper]: https://arxiv.org/abs/1902.04023 +[tdigest_lib]: https://github.com/caio/go-tdigest +[hyndman_fan]: http://www.maths.usyd.edu.au/u/UG/SM/STAT3022/r/current/Misc/Sample%20Quantiles%20in%20Statistical%20Packages.pdf diff --git a/plugins/aggregators/quantile/algorithms.go b/plugins/aggregators/quantile/algorithms.go new file mode 100644 index 0000000000000..641844f3f4e77 --- /dev/null +++ b/plugins/aggregators/quantile/algorithms.go @@ -0,0 +1,110 @@ +package quantile + +import ( + "math" + "sort" + + "github.com/caio/go-tdigest" +) + +type algorithm interface { + Add(value float64) error + Quantile(q float64) float64 +} + +func newTDigest(compression float64) (algorithm, error) { + return tdigest.New(tdigest.Compression(compression)) +} + +type exactAlgorithmR7 struct { + xs []float64 + sorted bool +} + +func newExactR7(_ float64) (algorithm, error) { + return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil +} + +func (e *exactAlgorithmR7) Add(value float64) error { + e.xs = append(e.xs, value) + e.sorted = false + + return nil +} + +func (e *exactAlgorithmR7) Quantile(q float64) float64 { + size := len(e.xs) + + // No information + if len(e.xs) == 0 { + return math.NaN() + } + + // Sort the array if necessary + if !e.sorted { + sort.Float64s(e.xs) + e.sorted = true + } + + // Get the quantile index and the fraction to the neighbor + // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R7 + // Same as Excel and Numpy. + N := float64(size) + n := q * (N - 1) + i, gamma := math.Modf(n) + j := int(i) + if j < 0 { + return e.xs[0] + } + if j >= size { + return e.xs[size-1] + } + // Linear interpolation + return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) +} + +type exactAlgorithmR8 struct { + xs []float64 + sorted bool +} + +func newExactR8(_ float64) (algorithm, error) { + return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil +} + +func (e *exactAlgorithmR8) Add(value float64) error { + e.xs = append(e.xs, value) + e.sorted = false + + return nil +} + +func (e *exactAlgorithmR8) Quantile(q float64) float64 { + size := len(e.xs) + + // No information + if size == 0 { + return math.NaN() + } + + // Sort the array if necessary + if !e.sorted { + sort.Float64s(e.xs) + e.sorted = true + } + + // Get the quantile index and the fraction to the neighbor + // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R8 + N := float64(size) + n := q*(N+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper + i, gamma := math.Modf(n) + j := int(i) + if j < 0 { + return e.xs[0] + } + if j >= size { + return e.xs[size-1] + } + // Linear interpolation + return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) +} diff --git a/plugins/aggregators/quantile/quantile.go b/plugins/aggregators/quantile/quantile.go new file mode 100644 index 0000000000000..cb58ef2e826d2 --- /dev/null +++ b/plugins/aggregators/quantile/quantile.go @@ -0,0 +1,165 @@ +package quantile + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type Quantile struct { + Quantiles []float64 `toml:"quantiles"` + Compression float64 `toml:"compression"` + AlgorithmType string `toml:"algorithm"` + + newAlgorithm newAlgorithmFunc + + cache map[uint64]aggregate + suffixes []string +} + +type aggregate struct { + name string + fields map[string]algorithm + tags map[string]string +} + +type newAlgorithmFunc func(compression float64) (algorithm, error) + +var sampleConfig = ` + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## Quantiles to output in the range [0,1] + # quantiles = [0.25, 0.5, 0.75] + + ## Type of aggregation algorithm + ## Supported are: + ## "t-digest" -- approximation using centroids, can cope with large number of samples + ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) + ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) + ## NOTE: Do not use "exact" algorithms with large number of samples + ## to not impair performance or memory consumption! + # algorithm = "t-digest" + + ## Compression for approximation (t-digest). The value needs to be + ## greater or equal to 1.0. Smaller values will result in more + ## performance but less accuracy. + # compression = 100.0 +` + +func (q *Quantile) SampleConfig() string { + return sampleConfig +} + +func (q *Quantile) Description() string { + return "Keep the aggregate quantiles of each metric passing through." +} + +func (q *Quantile) Add(in telegraf.Metric) { + id := in.HashID() + if cached, ok := q.cache[id]; ok { + fields := in.Fields() + for k, algo := range cached.fields { + if field, ok := fields[k]; ok { + if v, isconvertible := convert(field); isconvertible { + algo.Add(v) + } + } + } + return + } + + // New metric, setup cache and init algorithm + a := aggregate{ + name: in.Name(), + tags: in.Tags(), + fields: make(map[string]algorithm), + } + for k, field := range in.Fields() { + if v, isconvertible := convert(field); isconvertible { + // This should never error out as we tested it in Init() + algo, _ := q.newAlgorithm(q.Compression) + algo.Add(v) + a.fields[k] = algo + } + } + q.cache[id] = a +} + +func (q *Quantile) Push(acc telegraf.Accumulator) { + for _, aggregate := range q.cache { + fields := map[string]interface{}{} + for k, algo := range aggregate.fields { + for i, qtl := range q.Quantiles { + fields[k+q.suffixes[i]] = algo.Quantile(qtl) + } + } + acc.AddFields(aggregate.name, fields, aggregate.tags) + } +} + +func (q *Quantile) Reset() { + q.cache = make(map[uint64]aggregate) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + default: + return 0, false + } +} + +func (q *Quantile) Init() error { + switch q.AlgorithmType { + case "t-digest", "": + q.newAlgorithm = newTDigest + case "exact R7": + q.newAlgorithm = newExactR7 + case "exact R8": + q.newAlgorithm = newExactR8 + default: + return fmt.Errorf("unknown algorithm type %q", q.AlgorithmType) + } + if _, err := q.newAlgorithm(q.Compression); err != nil { + return fmt.Errorf("cannot create %q algorithm: %v", q.AlgorithmType, err) + } + + if len(q.Quantiles) == 0 { + q.Quantiles = []float64{0.25, 0.5, 0.75} + } + + duplicates := make(map[float64]bool) + q.suffixes = make([]string, len(q.Quantiles)) + for i, qtl := range q.Quantiles { + if qtl < 0.0 || qtl > 1.0 { + return fmt.Errorf("quantile %v out of range", qtl) + } + if _, found := duplicates[qtl]; found { + return fmt.Errorf("duplicate quantile %v", qtl) + } + duplicates[qtl] = true + q.suffixes[i] = fmt.Sprintf("_%03d", int(qtl*100.0)) + } + + q.Reset() + + return nil +} + +func init() { + aggregators.Add("quantile", func() telegraf.Aggregator { + return &Quantile{Compression: 100} + }) +} diff --git a/plugins/aggregators/quantile/quantile_test.go b/plugins/aggregators/quantile/quantile_test.go new file mode 100644 index 0000000000000..4095f0c5837be --- /dev/null +++ b/plugins/aggregators/quantile/quantile_test.go @@ -0,0 +1,635 @@ +package quantile + +import ( + "math/rand" + "testing" + "time" + + "github.com/google/go-cmp/cmp/cmpopts" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConfigInvalidAlgorithm(t *testing.T) { + q := Quantile{AlgorithmType: "a strange one"} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "unknown algorithm type") +} + +func TestConfigInvalidCompression(t *testing.T) { + q := Quantile{Compression: 0, AlgorithmType: "t-digest"} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create \"t-digest\" algorithm") +} + +func TestConfigInvalidQuantiles(t *testing.T) { + q := Quantile{Compression: 100, Quantiles: []float64{-0.5}} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "quantile -0.5 out of range") + + q = Quantile{Compression: 100, Quantiles: []float64{1.5}} + err = q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "quantile 1.5 out of range") + + q = Quantile{Compression: 100, Quantiles: []float64{0.1, 0.2, 0.3, 0.1}} + err = q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "duplicate quantile") +} + +func TestSingleMetricTDigest(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.75, + "a_050": 49.50, + "a_075": 74.25, + "b_025": 24.75, + "b_050": 49.50, + "b_075": 74.25, + "c_025": 24.75, + "c_050": 49.50, + "c_075": 74.25, + "d_025": 24.75, + "d_050": 49.50, + "d_075": 74.25, + "e_025": 24.75, + "e_050": 49.50, + "e_075": 74.25, + "f_025": 24.75, + "f_050": 49.50, + "f_075": 74.25, + "g_025": 0.2475, + "g_050": 0.4950, + "g_075": 0.7425, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsTDigest(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.75, "a_050": 49.50, "a_075": 74.25, + "b_025": 24.75, "b_050": 49.50, "b_075": 74.25, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 49.50, "a_050": 99.00, "a_075": 148.50, + "b_025": 49.50, "b_050": 99.00, "b_075": 148.50, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func TestSingleMetricExactR7(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.75, + "a_050": 49.50, + "a_075": 74.25, + "b_025": 24.75, + "b_050": 49.50, + "b_075": 74.25, + "c_025": 24.75, + "c_050": 49.50, + "c_075": 74.25, + "d_025": 24.75, + "d_050": 49.50, + "d_075": 74.25, + "e_025": 24.75, + "e_050": 49.50, + "e_075": 74.25, + "f_025": 24.75, + "f_050": 49.50, + "f_075": 74.25, + "g_025": 0.2475, + "g_050": 0.4950, + "g_075": 0.7425, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsExactR7(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.75, "a_050": 49.50, "a_075": 74.25, + "b_025": 24.75, "b_050": 49.50, "b_075": 74.25, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 49.50, "a_050": 99.00, "a_075": 148.50, + "b_025": 49.50, "b_050": 99.00, "b_075": 148.50, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func TestSingleMetricExactR8(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.417, + "a_050": 49.500, + "a_075": 74.583, + "b_025": 24.417, + "b_050": 49.500, + "b_075": 74.583, + "c_025": 24.417, + "c_050": 49.500, + "c_075": 74.583, + "d_025": 24.417, + "d_050": 49.500, + "d_075": 74.583, + "e_025": 24.417, + "e_050": 49.500, + "e_075": 74.583, + "f_025": 24.417, + "f_050": 49.500, + "f_075": 74.583, + "g_025": 0.24417, + "g_050": 0.49500, + "g_075": 0.74583, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsExactR8(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.417, "a_050": 49.500, "a_075": 74.583, + "b_025": 24.417, "b_050": 49.500, "b_075": 74.583, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 48.833, "a_050": 99.000, "a_075": 149.167, + "b_025": 48.833, "b_050": 99.000, "b_075": 149.167, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func BenchmarkDefaultTDigest(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultTDigest100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{Compression: 100, Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR7(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR7100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{AlgorithmType: "exact R7", Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR8(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR8100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{AlgorithmType: "exact R8", Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} diff --git a/plugins/aggregators/valuecounter/valuecounter_test.go b/plugins/aggregators/valuecounter/valuecounter_test.go index 8cec5f36653c4..75aa6deb01bf4 100644 --- a/plugins/aggregators/valuecounter/valuecounter_test.go +++ b/plugins/aggregators/valuecounter/valuecounter_test.go @@ -19,7 +19,7 @@ func NewTestValueCounter(fields []string) telegraf.Aggregator { return vc } -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "status": 200, @@ -28,7 +28,7 @@ var m1, _ = metric.New("m1", time.Now(), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "status": "OK", diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go new file mode 100644 index 0000000000000..03fd97f95077f --- /dev/null +++ b/plugins/common/cookie/cookie.go @@ -0,0 +1,111 @@ +package cookie + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/cookiejar" + "strings" + "sync" + "time" + + clockutil "github.com/benbjohnson/clock" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" +) + +type CookieAuthConfig struct { + URL string `toml:"cookie_auth_url"` + Method string `toml:"cookie_auth_method"` + + // HTTP Basic Auth Credentials + Username string `toml:"cookie_auth_username"` + Password string `toml:"cookie_auth_password"` + + Body string `toml:"cookie_auth_body"` + Renewal config.Duration `toml:"cookie_auth_renewal"` + + client *http.Client + wg sync.WaitGroup +} + +func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock clockutil.Clock) (err error) { + if err = c.initializeClient(client); err != nil { + return err + } + + // continual auth renewal if set + if c.Renewal > 0 { + ticker := clock.Ticker(time.Duration(c.Renewal)) + // this context is used in the tests only, it is to cancel the goroutine + go c.authRenewal(context.Background(), ticker, log) + } + + return nil +} + +func (c *CookieAuthConfig) initializeClient(client *http.Client) (err error) { + c.client = client + + if c.Method == "" { + c.Method = http.MethodPost + } + + // add cookie jar to HTTP client + if c.client.Jar, err = cookiejar.New(nil); err != nil { + return err + } + + return c.auth() +} + +func (c *CookieAuthConfig) authRenewal(ctx context.Context, ticker *clockutil.Ticker, log telegraf.Logger) { + for { + select { + case <-ctx.Done(): + c.wg.Done() + return + case <-ticker.C: + if err := c.auth(); err != nil && log != nil { + log.Errorf("renewal failed for %q: %v", c.URL, err) + } + } + } +} + +func (c *CookieAuthConfig) auth() error { + var body io.ReadCloser + if c.Body != "" { + body = io.NopCloser(strings.NewReader(c.Body)) + defer body.Close() + } + + req, err := http.NewRequest(c.Method, c.URL, body) + if err != nil { + return err + } + + if c.Username != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err = io.Copy(io.Discard, resp.Body); err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("cookie auth renewal received status code: %v (%v)", + resp.StatusCode, + http.StatusText(resp.StatusCode), + ) + } + + return nil +} diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go new file mode 100644 index 0000000000000..b32ceb0059e8b --- /dev/null +++ b/plugins/common/cookie/cookie_test.go @@ -0,0 +1,243 @@ +package cookie + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + clockutil "github.com/benbjohnson/clock" + "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const ( + reqUser = "testUser" + reqPasswd = "testPassword" + reqBody = "a body" + + authEndpointNoCreds = "/auth" + authEndpointWithBasicAuth = "/authWithCreds" + authEndpointWithBasicAuthOnlyUsername = "/authWithCredsUser" + authEndpointWithBody = "/authWithBody" +) + +var fakeCookie = &http.Cookie{ + Name: "test-cookie", + Value: "this is an auth cookie", +} + +type fakeServer struct { + *httptest.Server + *int32 +} + +func newFakeServer(t *testing.T) fakeServer { + var c int32 + return fakeServer{ + Server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + authed := func() { + atomic.AddInt32(&c, 1) // increment auth counter + http.SetCookie(w, fakeCookie) // set fake cookie + } + switch r.URL.Path { + case authEndpointNoCreds: + authed() + case authEndpointWithBody: + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + if !cmp.Equal([]byte(reqBody), body) { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + case authEndpointWithBasicAuth: + u, p, ok := r.BasicAuth() + if !ok || u != reqUser || p != reqPasswd { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + case authEndpointWithBasicAuthOnlyUsername: + u, p, ok := r.BasicAuth() + if !ok || u != reqUser || p != "" { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + default: + // ensure cookie exists on request + if _, err := r.Cookie(fakeCookie.Name); err != nil { + w.WriteHeader(http.StatusForbidden) + return + } + _, _ = w.Write([]byte("good test response")) + } + })), + int32: &c, + } +} + +func (s fakeServer) checkResp(t *testing.T, expCode int) { + t.Helper() + resp, err := s.Client().Get(s.URL + "/endpoint") + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, expCode, resp.StatusCode) + + if expCode == http.StatusOK { + require.Len(t, resp.Request.Cookies(), 1) + require.Equal(t, "test-cookie", resp.Request.Cookies()[0].Name) + } +} + +func (s fakeServer) checkAuthCount(t *testing.T, atLeast int32) { + t.Helper() + require.GreaterOrEqual(t, atomic.LoadInt32(s.int32), atLeast) +} + +func TestAuthConfig_Start(t *testing.T) { + const ( + renewal = 50 * time.Millisecond + renewalCheck = 5 * renewal + ) + type fields struct { + Method string + Username string + Password string + Body string + } + type args struct { + renewal time.Duration + endpoint string + } + tests := []struct { + name string + fields fields + args args + wantErr error + firstAuthCount int32 + lastAuthCount int32 + firstHTTPResponse int + lastHTTPResponse int + }{ + { + name: "success no creds, no body, default method", + args: args{ + renewal: renewal, + endpoint: authEndpointNoCreds, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, + { + name: "success with creds, no body", + fields: fields{ + Method: http.MethodPost, + Username: reqUser, + Password: reqPasswd, + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBasicAuth, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, + { + name: "failure with bad creds", + fields: fields{ + Method: http.MethodPost, + Username: reqUser, + Password: "a bad password", + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBasicAuth, + }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, + }, + { + name: "success with no creds, with good body", + fields: fields{ + Method: http.MethodPost, + Body: reqBody, + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBody, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, + { + name: "failure with bad body", + fields: fields{ + Method: http.MethodPost, + Body: "a bad body", + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBody, + }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + srv := newFakeServer(t) + c := &CookieAuthConfig{ + URL: srv.URL + tt.args.endpoint, + Method: tt.fields.Method, + Username: tt.fields.Username, + Password: tt.fields.Password, + Body: tt.fields.Body, + Renewal: config.Duration(tt.args.renewal), + } + if err := c.initializeClient(srv.Client()); tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + } + mock := clockutil.NewMock() + ticker := mock.Ticker(time.Duration(c.Renewal)) + defer ticker.Stop() + + c.wg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + go c.authRenewal(ctx, ticker, testutil.Logger{Name: "cookie_auth"}) + + srv.checkAuthCount(t, tt.firstAuthCount) + srv.checkResp(t, tt.firstHTTPResponse) + mock.Add(renewalCheck) + // Ensure that the auth renewal goroutine has completed + cancel() + c.wg.Wait() + srv.checkAuthCount(t, tt.lastAuthCount) + srv.checkResp(t, tt.lastHTTPResponse) + + srv.Close() + }) + } +} diff --git a/plugins/common/encoding/decoder_test.go b/plugins/common/encoding/decoder_test.go index 87115318ad0ed..b8e19af9cea43 100644 --- a/plugins/common/encoding/decoder_test.go +++ b/plugins/common/encoding/decoder_test.go @@ -2,7 +2,7 @@ package encoding import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -66,7 +66,7 @@ func TestDecoder(t *testing.T) { require.NoError(t, err) buf := bytes.NewBuffer(tt.input) r := decoder.Reader(buf) - actual, err := ioutil.ReadAll(r) + actual, err := io.ReadAll(r) if tt.expectedErr { require.Error(t, err) return diff --git a/plugins/common/http/config.go b/plugins/common/http/config.go new file mode 100644 index 0000000000000..bd6ce4fefa308 --- /dev/null +++ b/plugins/common/http/config.go @@ -0,0 +1,64 @@ +package httpconfig + +import ( + "context" + "net/http" + "time" + + "github.com/benbjohnson/clock" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/cookie" + oauthConfig "github.com/influxdata/telegraf/plugins/common/oauth" + "github.com/influxdata/telegraf/plugins/common/proxy" + "github.com/influxdata/telegraf/plugins/common/tls" +) + +// Common HTTP client struct. +type HTTPClientConfig struct { + Timeout config.Duration `toml:"timeout"` + IdleConnTimeout config.Duration `toml:"idle_conn_timeout"` + + proxy.HTTPProxy + tls.ClientConfig + oauthConfig.OAuth2Config + cookie.CookieAuthConfig +} + +func (h *HTTPClientConfig) CreateClient(ctx context.Context, log telegraf.Logger) (*http.Client, error) { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + prox, err := h.HTTPProxy.Proxy() + if err != nil { + return nil, err + } + + transport := &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: prox, + IdleConnTimeout: time.Duration(h.IdleConnTimeout), + } + + timeout := h.Timeout + if timeout == 0 { + timeout = config.Duration(time.Second * 5) + } + + client := &http.Client{ + Transport: transport, + Timeout: time.Duration(timeout), + } + + client = h.OAuth2Config.CreateOauth2Client(ctx, client) + + if h.CookieAuthConfig.URL != "" { + if err := h.CookieAuthConfig.Start(client, log, clock.New()); err != nil { + return nil, err + } + } + + return client, nil +} diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go new file mode 100644 index 0000000000000..56e70a26b4a95 --- /dev/null +++ b/plugins/common/kafka/config.go @@ -0,0 +1,93 @@ +package kafka + +import ( + "log" + + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf/plugins/common/tls" +) + +// ReadConfig for kafka clients meaning to read from Kafka. +type ReadConfig struct { + Config +} + +// SetConfig on the sarama.Config object from the ReadConfig struct. +func (k *ReadConfig) SetConfig(config *sarama.Config) error { + config.Consumer.Return.Errors = true + + return k.Config.SetConfig(config) +} + +// WriteConfig for kafka clients meaning to write to kafka +type WriteConfig struct { + Config + + RequiredAcks int `toml:"required_acks"` + MaxRetry int `toml:"max_retry"` + MaxMessageBytes int `toml:"max_message_bytes"` + IdempotentWrites bool `toml:"idempotent_writes"` +} + +// SetConfig on the sarama.Config object from the WriteConfig struct. +func (k *WriteConfig) SetConfig(config *sarama.Config) error { + config.Producer.Return.Successes = true + config.Producer.Idempotent = k.IdempotentWrites + config.Producer.Retry.Max = k.MaxRetry + if k.MaxMessageBytes > 0 { + config.Producer.MaxMessageBytes = k.MaxMessageBytes + } + config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) + if config.Producer.Idempotent { + config.Net.MaxOpenRequests = 1 + } + return k.Config.SetConfig(config) +} + +// Config common to all Kafka clients. +type Config struct { + SASLAuth + tls.ClientConfig + + Version string `toml:"version"` + ClientID string `toml:"client_id"` + CompressionCodec int `toml:"compression_codec"` + + // EnableTLS deprecated + EnableTLS *bool `toml:"enable_tls"` +} + +// SetConfig on the sarama.Config object from the Config struct. +func (k *Config) SetConfig(config *sarama.Config) error { + if k.EnableTLS != nil { + log.Printf("W! [kafka] enable_tls is deprecated, and the setting does nothing, you can safely remove it from the config") + } + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + + config.Version = version + } + + if k.ClientID != "" { + config.ClientID = k.ClientID + } else { + config.ClientID = "Telegraf" + } + + config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) + + tlsConfig, err := k.ClientConfig.TLSConfig() + if err != nil { + return err + } + + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + return k.SetSASLConfig(config) +} diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go index cd3358b3833ec..06ab64dab34be 100644 --- a/plugins/common/kafka/sasl.go +++ b/plugins/common/kafka/sasl.go @@ -6,6 +6,78 @@ import ( "github.com/Shopify/sarama" ) +type SASLAuth struct { + SASLUsername string `toml:"sasl_username"` + SASLPassword string `toml:"sasl_password"` + SASLMechanism string `toml:"sasl_mechanism"` + SASLVersion *int `toml:"sasl_version"` + + // GSSAPI config + SASLGSSAPIServiceName string `toml:"sasl_gssapi_service_name"` + SASLGSSAPIAuthType string `toml:"sasl_gssapi_auth_type"` + SASLGSSAPIDisablePAFXFAST bool `toml:"sasl_gssapi_disable_pafxfast"` + SASLGSSAPIKerberosConfigPath string `toml:"sasl_gssapi_kerberos_config_path"` + SASLGSSAPIKeyTabPath string `toml:"sasl_gssapi_key_tab_path"` + SASLGSSAPIRealm string `toml:"sasl_gssapi_realm"` + + // OAUTHBEARER config. experimental. undoubtedly this is not good enough. + SASLAccessToken string `toml:"sasl_access_token"` +} + +// SetSASLConfig configures SASL for kafka (sarama) +func (k *SASLAuth) SetSASLConfig(config *sarama.Config) error { + config.Net.SASL.User = k.SASLUsername + config.Net.SASL.Password = k.SASLPassword + + if k.SASLMechanism != "" { + config.Net.SASL.Mechanism = sarama.SASLMechanism(k.SASLMechanism) + switch config.Net.SASL.Mechanism { + case sarama.SASLTypeSCRAMSHA256: + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{HashGeneratorFcn: SHA256} + } + case sarama.SASLTypeSCRAMSHA512: + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{HashGeneratorFcn: SHA512} + } + case sarama.SASLTypeOAuth: + config.Net.SASL.TokenProvider = k // use self as token provider. + case sarama.SASLTypeGSSAPI: + config.Net.SASL.GSSAPI.ServiceName = k.SASLGSSAPIServiceName + config.Net.SASL.GSSAPI.AuthType = gssapiAuthType(k.SASLGSSAPIAuthType) + config.Net.SASL.GSSAPI.Username = k.SASLUsername + config.Net.SASL.GSSAPI.Password = k.SASLPassword + config.Net.SASL.GSSAPI.DisablePAFXFAST = k.SASLGSSAPIDisablePAFXFAST + config.Net.SASL.GSSAPI.KerberosConfigPath = k.SASLGSSAPIKerberosConfigPath + config.Net.SASL.GSSAPI.KeyTabPath = k.SASLGSSAPIKeyTabPath + config.Net.SASL.GSSAPI.Realm = k.SASLGSSAPIRealm + + case sarama.SASLTypePlaintext: + // nothing. + default: + } + } + + if k.SASLUsername != "" || k.SASLMechanism != "" { + config.Net.SASL.Enable = true + + version, err := SASLVersion(config.Version, k.SASLVersion) + if err != nil { + return err + } + config.Net.SASL.Version = version + } + return nil +} + +// Token does nothing smart, it just grabs a hard-coded token from config. +func (k *SASLAuth) Token() (*sarama.AccessToken, error) { + return &sarama.AccessToken{ + Token: k.SASLAccessToken, + Extensions: map[string]string{}, + }, nil +} + func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, error) { if saslVersion == nil { if kafkaVersion.IsAtLeast(sarama.V1_0_0_0) { @@ -23,3 +95,14 @@ func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, err return 0, errors.New("invalid SASL version") } } + +func gssapiAuthType(authType string) int { + switch authType { + case "KRB5_USER_AUTH": + return sarama.KRB5_USER_AUTH + case "KRB5_KEYTAB_AUTH": + return sarama.KRB5_KEYTAB_AUTH + default: + return 0 + } +} diff --git a/plugins/common/kafka/scram_client.go b/plugins/common/kafka/scram_client.go new file mode 100644 index 0000000000000..f6aa9d6c4e285 --- /dev/null +++ b/plugins/common/kafka/scram_client.go @@ -0,0 +1,36 @@ +package kafka + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" + + "github.com/xdg/scram" +) + +var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } +var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } + +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go index a7f99023be1ba..7451639a75423 100644 --- a/plugins/common/logrus/hook.go +++ b/plugins/common/logrus/hook.go @@ -1,7 +1,7 @@ package logrus import ( - "io/ioutil" + "io" "log" "strings" "sync" @@ -19,7 +19,7 @@ type LogHook struct { // that directly log to the logrus system without providing an override method. func InstallHook() { once.Do(func() { - logrus.SetOutput(ioutil.Discard) + logrus.SetOutput(io.Discard) logrus.AddHook(&LogHook{}) }) } diff --git a/plugins/common/oauth/config.go b/plugins/common/oauth/config.go new file mode 100644 index 0000000000000..aa42a7a65569a --- /dev/null +++ b/plugins/common/oauth/config.go @@ -0,0 +1,32 @@ +package oauth + +import ( + "context" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +type OAuth2Config struct { + // OAuth2 Credentials + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` +} + +func (o *OAuth2Config) CreateOauth2Client(ctx context.Context, client *http.Client) *http.Client { + if o.ClientID != "" && o.ClientSecret != "" && o.TokenURL != "" { + oauthConfig := clientcredentials.Config{ + ClientID: o.ClientID, + ClientSecret: o.ClientSecret, + TokenURL: o.TokenURL, + Scopes: o.Scopes, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, client) + client = oauthConfig.Client(ctx) + } + + return client +} diff --git a/plugins/processors/reverse_dns/parallel/ordered.go b/plugins/common/parallel/ordered.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/ordered.go rename to plugins/common/parallel/ordered.go diff --git a/plugins/processors/reverse_dns/parallel/parallel.go b/plugins/common/parallel/parallel.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/parallel.go rename to plugins/common/parallel/parallel.go diff --git a/plugins/processors/reverse_dns/parallel/parallel_test.go b/plugins/common/parallel/parallel_test.go similarity index 89% rename from plugins/processors/reverse_dns/parallel/parallel_test.go rename to plugins/common/parallel/parallel_test.go index 0d2839a24f4cd..1e2eaccb98654 100644 --- a/plugins/processors/reverse_dns/parallel/parallel_test.go +++ b/plugins/common/parallel/parallel_test.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel" + "github.com/influxdata/telegraf/plugins/common/parallel" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -18,14 +18,13 @@ func TestOrderedJobsStayOrdered(t *testing.T) { p := parallel.NewOrdered(acc, jobFunc, 10000, 10) now := time.Now() for i := 0; i < 20000; i++ { - m, err := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": i, }, now, ) - require.NoError(t, err) now = now.Add(1) p.Enqueue(m) } @@ -51,14 +50,13 @@ func TestUnorderedJobsDontDropAnyJobs(t *testing.T) { expectedTotal := 0 for i := 0; i < 20000; i++ { expectedTotal += i - m, err := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": i, }, now, ) - require.NoError(t, err) now = now.Add(1) p.Enqueue(m) } @@ -79,7 +77,7 @@ func BenchmarkOrdered(b *testing.B) { p := parallel.NewOrdered(acc, jobFunc, 10000, 10) - m, _ := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": 1, @@ -99,7 +97,7 @@ func BenchmarkUnordered(b *testing.B) { p := parallel.NewUnordered(acc, jobFunc, 10) - m, _ := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": 1, diff --git a/plugins/processors/reverse_dns/parallel/unordered.go b/plugins/common/parallel/unordered.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/unordered.go rename to plugins/common/parallel/unordered.go diff --git a/plugins/common/proxy/proxy.go b/plugins/common/proxy/proxy.go new file mode 100644 index 0000000000000..4ef97f1eb52e8 --- /dev/null +++ b/plugins/common/proxy/proxy.go @@ -0,0 +1,24 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/url" +) + +type HTTPProxy struct { + HTTPProxyURL string `toml:"http_proxy_url"` +} + +type proxyFunc func(req *http.Request) (*url.URL, error) + +func (p *HTTPProxy) Proxy() (proxyFunc, error) { + if len(p.HTTPProxyURL) > 0 { + url, err := url.Parse(p.HTTPProxyURL) + if err != nil { + return nil, fmt.Errorf("error parsing proxy url %q: %w", p.HTTPProxyURL, err) + } + return http.ProxyURL(url), nil + } + return http.ProxyFromEnvironment, nil +} diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index d5d1910964e7c..089c2b7ee7525 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -3,7 +3,6 @@ package shim import ( "errors" "fmt" - "io/ioutil" "log" "os" @@ -34,15 +33,15 @@ func (s *Shim) LoadConfig(filePath *string) error { } if conf.Input != nil { if err = s.AddInput(conf.Input); err != nil { - return fmt.Errorf("Failed to add Input: %w", err) + return fmt.Errorf("failed to add Input: %w", err) } } else if conf.Processor != nil { if err = s.AddStreamingProcessor(conf.Processor); err != nil { - return fmt.Errorf("Failed to add Processor: %w", err) + return fmt.Errorf("failed to add Processor: %w", err) } } else if conf.Output != nil { if err = s.AddOutput(conf.Output); err != nil { - return fmt.Errorf("Failed to add Output: %w", err) + return fmt.Errorf("failed to add Output: %w", err) } } return nil @@ -53,14 +52,12 @@ func LoadConfig(filePath *string) (loaded loadedConfig, err error) { var data string conf := config{} if filePath != nil && *filePath != "" { - - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return loadedConfig{}, err } data = expandEnvVars(b) - } else { conf, err = DefaultImportedPlugins() if err != nil { @@ -116,7 +113,11 @@ func createPluginsWithTomlConfig(md toml.MetaData, conf config) (loadedConfig, e plugin := creator() if len(primitives) > 0 { primitive := primitives[0] - if err := md.PrimitiveDecode(primitive, plugin); err != nil { + var p telegraf.PluginDescriber = plugin + if processor, ok := plugin.(unwrappable); ok { + p = processor.Unwrap() + } + if err := md.PrimitiveDecode(primitive, p); err != nil { return loadedConf, err } } @@ -169,3 +170,7 @@ func DefaultImportedPlugins() (config, error) { } return conf, nil } + +type unwrappable interface { + Unwrap() telegraf.Processor +} diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index be4ee4140feb5..762ca5dd283b2 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf" tgConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/processors" "github.com/stretchr/testify/require" ) @@ -53,11 +54,26 @@ func TestLoadingSpecialTypes(t *testing.T) { require.EqualValues(t, 3*time.Second, inp.Duration) require.EqualValues(t, 3*1000*1000, inp.Size) + require.EqualValues(t, 52, inp.Hex) +} + +func TestLoadingProcessorWithConfig(t *testing.T) { + proc := &testConfigProcessor{} + processors.Add("test_config_load", func() telegraf.Processor { + return proc + }) + + c := "./testdata/processor.conf" + _, err := LoadConfig(&c) + require.NoError(t, err) + + require.EqualValues(t, "yep", proc.Loaded) } type testDurationInput struct { Duration tgConfig.Duration `toml:"duration"` Size tgConfig.Size `toml:"size"` + Hex int64 `toml:"hex"` } func (i *testDurationInput) SampleConfig() string { @@ -67,6 +83,21 @@ func (i *testDurationInput) SampleConfig() string { func (i *testDurationInput) Description() string { return "" } -func (i *testDurationInput) Gather(acc telegraf.Accumulator) error { +func (i *testDurationInput) Gather(_ telegraf.Accumulator) error { return nil } + +type testConfigProcessor struct { + Loaded string `toml:"loaded"` +} + +func (p *testConfigProcessor) SampleConfig() string { + return "" +} + +func (p *testConfigProcessor) Description() string { + return "" +} +func (p *testConfigProcessor) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + return metrics +} diff --git a/plugins/common/shim/example/cmd/main.go b/plugins/common/shim/example/cmd/main.go index 4f51f7f878fb3..ddabaa5da2a81 100644 --- a/plugins/common/shim/example/cmd/main.go +++ b/plugins/common/shim/example/cmd/main.go @@ -13,7 +13,7 @@ import ( ) var pollInterval = flag.Duration("poll_interval", 1*time.Second, "how often to send metrics") -var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "how often to send metrics") +var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "set to true to disable polling. You want to use this when you are sending metrics on your own schedule") var configFile = flag.String("config", "", "path to the config file for this plugin") var err error @@ -30,7 +30,7 @@ var err error // // shim.AddInput(myInput) // -// // now the shim.Run() call as below. +// // now the shim.Run() call as below. Note the shim is only intended to run a single plugin. // func main() { // parse command line options @@ -52,7 +52,7 @@ func main() { os.Exit(1) } - // run the input plugin(s) until stdin closes or we receive a termination signal + // run a single plugin until stdin closes or we receive a termination signal if err := shim.Run(*pollInterval); err != nil { fmt.Fprintf(os.Stderr, "Err: %s\n", err) os.Exit(1) diff --git a/plugins/common/shim/goshim_test.go b/plugins/common/shim/goshim_test.go index 080a513ade250..bbd1a0b703cc5 100644 --- a/plugins/common/shim/goshim_test.go +++ b/plugins/common/shim/goshim_test.go @@ -71,7 +71,7 @@ func (i *erroringInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *erroringInput) Start(acc telegraf.Accumulator) error { +func (i *erroringInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 32f97d5924bc5..9a0423261ac14 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "strings" "testing" "time" @@ -45,7 +44,9 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) stdinWriter.Close() - go ioutil.ReadAll(r) + go func() { + _, _ = io.ReadAll(r) + }() // check that it exits cleanly <-exited } @@ -100,7 +101,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(acc telegraf.Accumulator) error { +func (i *testInput) Start(_ telegraf.Accumulator) error { return nil } @@ -133,7 +134,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(acc telegraf.Accumulator) error { +func (i *serviceInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/common/shim/logger.go b/plugins/common/shim/logger.go index 88db63ab7d58c..c8a6ee12ba350 100644 --- a/plugins/common/shim/logger.go +++ b/plugins/common/shim/logger.go @@ -84,6 +84,4 @@ func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { field.Set(reflect.ValueOf(log)) } } - - return } diff --git a/plugins/common/shim/output_test.go b/plugins/common/shim/output_test.go index 5a74d59edb240..468ae28e05eee 100644 --- a/plugins/common/shim/output_test.go +++ b/plugins/common/shim/output_test.go @@ -34,7 +34,7 @@ func TestOutputShim(t *testing.T) { serializer, _ := serializers.NewInfluxSerializer() - m, _ := metric.New("thing", + m := metric.New("thing", map[string]string{ "a": "b", }, diff --git a/plugins/common/shim/processor.go b/plugins/common/shim/processor.go index 33dceba872759..d8f660b360cd6 100644 --- a/plugins/common/shim/processor.go +++ b/plugins/common/shim/processor.go @@ -1,14 +1,13 @@ package shim import ( - "bufio" "fmt" "sync" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" - "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" ) @@ -37,12 +36,7 @@ func (s *Shim) RunProcessor() error { acc := agent.NewAccumulator(s, s.metricCh) acc.SetPrecision(time.Nanosecond) - parser, err := parsers.NewInfluxParser() - if err != nil { - return fmt.Errorf("Failed to create new parser: %w", err) - } - - err = s.Processor.Start(acc) + err := s.Processor.Start(acc) if err != nil { return fmt.Errorf("failed to start processor: %w", err) } @@ -54,13 +48,21 @@ func (s *Shim) RunProcessor() error { wg.Done() }() - scanner := bufio.NewScanner(s.stdin) - for scanner.Scan() { - m, err := parser.ParseLine(scanner.Text()) + parser := influx.NewStreamParser(s.stdin) + for { + m, err := parser.Next() if err != nil { - fmt.Fprintf(s.stderr, "Failed to parse metric: %s\b", err) + if err == influx.EOF { + break // stream ended + } + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + fmt.Fprintf(s.stderr, "Failed to parse metric: %s\b", parseErr) + continue + } + fmt.Fprintf(s.stderr, "Failure during reading stdin: %s\b", err) continue } + s.Processor.Add(m, acc) } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index b4cf01ae0236f..bc00fb70d1bba 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -3,7 +3,7 @@ package shim import ( "bufio" "io" - "io/ioutil" + "math/rand" "sync" "testing" "time" @@ -16,7 +16,21 @@ import ( ) func TestProcessorShim(t *testing.T) { - p := &testProcessor{} + testSendAndRecieve(t, "f1", "fv1") +} + +func TestProcessorShimWithLargerThanDefaultScannerBufferSize(t *testing.T) { + letters := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]rune, bufio.MaxScanTokenSize*2) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + + testSendAndRecieve(t, "f1", string(b)) +} + +func testSendAndRecieve(t *testing.T, fieldKey string, fieldValue string) { + p := &testProcessor{"hi", "mom"} stdinReader, stdinWriter := io.Pipe() stdoutReader, stdoutWriter := io.Pipe() @@ -40,12 +54,13 @@ func TestProcessorShim(t *testing.T) { serializer, _ := serializers.NewInfluxSerializer() parser, _ := parsers.NewInfluxParser() - m, _ := metric.New("thing", + m := metric.New("thing", map[string]string{ "a": "b", }, map[string]interface{}{ - "v": 1, + "v": 1, + fieldKey: fieldValue, }, time.Now(), ) @@ -62,19 +77,26 @@ func TestProcessorShim(t *testing.T) { mOut, err := parser.ParseLine(out) require.NoError(t, err) - val, ok := mOut.GetTag("hi") + val, ok := mOut.GetTag(p.tagName) require.True(t, ok) - require.Equal(t, "mom", val) - - go ioutil.ReadAll(r) + require.Equal(t, p.tagValue, val) + val2, ok := mOut.Fields()[fieldKey] + require.True(t, ok) + require.Equal(t, fieldValue, val2) + go func() { + _, _ = io.ReadAll(r) + }() wg.Wait() } -type testProcessor struct{} +type testProcessor struct { + tagName string + tagValue string +} func (p *testProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, metric := range in { - metric.AddTag("hi", "mom") + metric.AddTag(p.tagName, p.tagValue) } return in } diff --git a/plugins/common/shim/testdata/processor.conf b/plugins/common/shim/testdata/processor.conf new file mode 100644 index 0000000000000..d45cc659d75a2 --- /dev/null +++ b/plugins/common/shim/testdata/processor.conf @@ -0,0 +1,2 @@ +[[processors.test_config_load]] + loaded = "yep" \ No newline at end of file diff --git a/plugins/common/shim/testdata/special.conf b/plugins/common/shim/testdata/special.conf index c324b638497c5..53af78620701d 100644 --- a/plugins/common/shim/testdata/special.conf +++ b/plugins/common/shim/testdata/special.conf @@ -1,4 +1,5 @@ # testing custom field types [[inputs.test]] duration = "3s" - size = "3MB" \ No newline at end of file + size = "3MB" + hex = 0x34 \ No newline at end of file diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 59fbc49526745..586ec8fd4a417 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" "strings" ) @@ -14,6 +14,7 @@ type ClientConfig struct { TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` InsecureSkipVerify bool `toml:"insecure_skip_verify"` + ServerName string `toml:"tls_server_name"` // Deprecated in 1.7; use TLS variables above SSLCA string `toml:"ssl_ca"` @@ -45,11 +46,14 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) { c.TLSKey = c.SSLKey } - // TODO: return default tls.Config; plugins should not call if they don't - // want TLS, this will require using another option to determine. In the - // case of an HTTP plugin, you could use `https`. Other plugins may need - // the dedicated option `TLSEnable`. - if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify { + // This check returns a nil (aka, "use the default") + // tls.Config if no field is set that would have an effect on + // a TLS connection. That is, any of: + // * client certificate settings, + // * peer certificate authorities, + // * disabled security, or + // * an SNI server name. + if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify && c.ServerName == "" { return nil, nil } @@ -73,6 +77,10 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) { } } + if c.ServerName != "" { + tlsConfig.ServerName = c.ServerName + } + return tlsConfig, nil } @@ -139,7 +147,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { func makeCertPool(certFiles []string) (*x509.CertPool, error) { pool := x509.NewCertPool() for _, certFile := range certFiles { - pem, err := ioutil.ReadFile(certFile) + pem, err := os.ReadFile(certFile) if err != nil { return nil, fmt.Errorf( "could not read certificate %q: %v", certFile, err) diff --git a/plugins/common/tls/config_test.go b/plugins/common/tls/config_test.go index 93656087dfd55..2784ace6920e3 100644 --- a/plugins/common/tls/config_test.go +++ b/plugins/common/tls/config_test.go @@ -86,6 +86,14 @@ func TestClientConfig(t *testing.T) { SSLKey: pki.ClientKeyPath(), }, }, + { + name: "set SNI server name", + client: tls.ClientConfig{ + ServerName: "foo.example.com", + }, + expNil: false, + expErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index f7847f83d8d04..f5cf7927342e5 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -3,7 +3,7 @@ package activemq import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -12,19 +12,19 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type ActiveMQ struct { - Server string `toml:"server"` - Port int `toml:"port"` - URL string `toml:"url"` - Username string `toml:"username"` - Password string `toml:"password"` - Webadmin string `toml:"webadmin"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Server string `toml:"server"` + Port int `toml:"port"` + URL string `toml:"url"` + Username string `toml:"username"` + Password string `toml:"password"` + Webadmin string `toml:"webadmin"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -49,9 +49,9 @@ type Subscribers struct { type Subscriber struct { XMLName xml.Name `xml:"subscriber"` - ClientId string `xml:"clientId,attr"` + ClientID string `xml:"clientId,attr"` SubscriptionName string `xml:"subscriptionName,attr"` - ConnectionId string `xml:"connectionId,attr"` + ConnectionID string `xml:"connectionId,attr"` DestinationName string `xml:"destinationName,attr"` Selector string `xml:"selector,attr"` Active string `xml:"active,attr"` @@ -117,7 +117,7 @@ func (a *ActiveMQ) SampleConfig() string { return sampleConfig } -func (a *ActiveMQ) createHttpClient() (*http.Client, error) { +func (a *ActiveMQ) createHTTPClient() (*http.Client, error) { tlsCfg, err := a.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -127,15 +127,15 @@ func (a *ActiveMQ) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: a.ResponseTimeout.Duration, + Timeout: time.Duration(a.ResponseTimeout), } return client, nil } func (a *ActiveMQ) Init() error { - if a.ResponseTimeout.Duration < time.Second { - a.ResponseTimeout.Duration = time.Second * 5 + if a.ResponseTimeout < config.Duration(time.Second) { + a.ResponseTimeout = config.Duration(time.Second * 5) } var err error @@ -157,7 +157,7 @@ func (a *ActiveMQ) Init() error { a.baseURL = u - a.client, err = a.createHttpClient() + a.client, err = a.createHTTPClient() if err != nil { return err } @@ -184,7 +184,7 @@ func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) { return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { @@ -228,9 +228,9 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber records := make(map[string]interface{}) tags := make(map[string]string) - tags["client_id"] = subscriber.ClientId + tags["client_id"] = subscriber.ClientID tags["subscription_name"] = subscriber.SubscriptionName - tags["connection_id"] = subscriber.ConnectionId + tags["connection_id"] = subscriber.ConnectionID tags["destination_name"] = subscriber.DestinationName tags["selector"] = subscriber.Selector tags["active"] = subscriber.Active diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go index 407a381775adc..1e733a4eed201 100644 --- a/plugins/inputs/activemq/activemq_test.go +++ b/plugins/inputs/activemq/activemq_test.go @@ -11,7 +11,6 @@ import ( ) func TestGatherQueuesMetrics(t *testing.T) { - s := ` @@ -31,7 +30,7 @@ func TestGatherQueuesMetrics(t *testing.T) { queues := Queues{} - xml.Unmarshal([]byte(s), &queues) + require.NoError(t, xml.Unmarshal([]byte(s), &queues)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -50,14 +49,13 @@ func TestGatherQueuesMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherQueuesMetrics(&acc, queues) acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags) } func TestGatherTopicsMetrics(t *testing.T) { - s := ` @@ -78,7 +76,7 @@ func TestGatherTopicsMetrics(t *testing.T) { topics := Topics{} - xml.Unmarshal([]byte(s), &topics) + require.NoError(t, xml.Unmarshal([]byte(s), &topics)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -97,14 +95,13 @@ func TestGatherTopicsMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherTopicsMetrics(&acc, topics) acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags) } func TestGatherSubscribersMetrics(t *testing.T) { - s := ` @@ -113,7 +110,7 @@ func TestGatherSubscribersMetrics(t *testing.T) { subscribers := Subscribers{} - xml.Unmarshal([]byte(s), &subscribers) + require.NoError(t, xml.Unmarshal([]byte(s), &subscribers)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -138,7 +135,7 @@ func TestGatherSubscribersMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherSubscribersMetrics(&acc, subscribers) acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags) @@ -152,13 +149,16 @@ func TestURLs(t *testing.T) { switch r.URL.Path { case "/admin/xml/queues.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) case "/admin/xml/topics.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) case "/admin/xml/subscribers.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) default: w.WriteHeader(http.StatusNotFound) t.Fatalf("unexpected path: " + r.URL.Path) diff --git a/plugins/inputs/aerospike/README.md b/plugins/inputs/aerospike/README.md index 66fbbe12ec8f0..59ff6ed702db7 100644 --- a/plugins/inputs/aerospike/README.md +++ b/plugins/inputs/aerospike/README.md @@ -28,18 +28,17 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # tls_key = "/etc/telegraf/key.pem" ## If false, skip chain & host verification # insecure_skip_verify = true - + # Feature Options # Add namespace variable to limit the namespaces executed on # Leave blank to do all # disable_query_namespaces = true # default false # namespaces = ["namespace1", "namespace2"] - # Enable set level telmetry + # Enable set level telemetry # query_sets = true # default: false # Add namespace set combinations to limit sets executed on - # Leave blank to do all - # sets = ["namespace1/set1", "namespace1/set2"] + # Leave blank to do all sets # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] # Histograms @@ -48,12 +47,10 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # by default, aerospike produces a 100 bucket histogram # this is not great for most graphing tools, this will allow - # the ability to squash this to a smaller number of buckets + # the ability to squash this to a smaller number of buckets # To have a balanced histogram, the number of buckets chosen # should divide evenly into 100. # num_histogram_buckets = 100 # default: 10 - - ``` ### Measurements: diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 7ab15d18168f7..dd2ff32df975f 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -10,11 +10,11 @@ import ( "sync" "time" + as "github.com/aerospike/aerospike-client-go" + "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - - as "github.com/aerospike/aerospike-client-go" ) type Aerospike struct { @@ -65,7 +65,7 @@ var sampleConfig = ` # disable_query_namespaces = true # default false # namespaces = ["namespace1", "namespace2"] - # Enable set level telmetry + # Enable set level telemetry # query_sets = true # default: false # Add namespace set combinations to limit sets executed on # Leave blank to do all sets @@ -77,10 +77,20 @@ var sampleConfig = ` # by default, aerospike produces a 100 bucket histogram # this is not great for most graphing tools, this will allow - # the ability to squash this to a smaller number of buckets + # the ability to squash this to a smaller number of buckets + # To have a balanced histogram, the number of buckets chosen + # should divide evenly into 100. # num_histogram_buckets = 100 # default: 10 ` +// On the random chance a hex value is all digits +// these are fields that can contain hex and should always be strings +var protectedHexFields = map[string]bool{ + "node_name": true, + "cluster_key": true, + "paxos_principal": true, +} + func (a *Aerospike) SampleConfig() string { return sampleConfig } @@ -111,7 +121,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { } if len(a.Servers) == 0 { - return a.gatherServer("127.0.0.1:3000", acc) + return a.gatherServer(acc, "127.0.0.1:3000") } var wg sync.WaitGroup @@ -119,7 +129,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { for _, server := range a.Servers { go func(serv string) { defer wg.Done() - acc.AddError(a.gatherServer(serv, acc)) + acc.AddError(a.gatherServer(acc, serv)) }(server) } @@ -127,7 +137,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { return nil } -func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) error { +func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) error { host, port, err := net.SplitHostPort(hostPort) if err != nil { return err @@ -154,7 +164,7 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro if err != nil { return err } - a.parseNodeInfo(stats, hostPort, n.GetName(), acc) + a.parseNodeInfo(acc, stats, hostPort, n.GetName()) namespaces, err := a.getNamespaces(n) if err != nil { @@ -168,18 +178,17 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro if err != nil { continue - } else { - a.parseNamespaceInfo(stats, hostPort, namespace, n.GetName(), acc) } + a.parseNamespaceInfo(acc, stats, hostPort, namespace, n.GetName()) if a.EnableTTLHistogram { - err = a.getTTLHistogram(hostPort, namespace, "", n, acc) + err = a.getTTLHistogram(acc, hostPort, namespace, "", n) if err != nil { continue } } if a.EnableObjectSizeLinearHistogram { - err = a.getObjectSizeLinearHistogram(hostPort, namespace, "", n, acc) + err = a.getObjectSizeLinearHistogram(acc, hostPort, namespace, "", n) if err != nil { continue } @@ -192,24 +201,22 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro if err == nil { for _, namespaceSet := range namespaceSets { namespace, set := splitNamespaceSet(namespaceSet) - stats, err := a.getSetInfo(namespaceSet, n) if err != nil { continue - } else { - a.parseSetInfo(stats, hostPort, namespaceSet, n.GetName(), acc) } + a.parseSetInfo(acc, stats, hostPort, namespaceSet, n.GetName()) if a.EnableTTLHistogram { - err = a.getTTLHistogram(hostPort, namespace, set, n, acc) + err = a.getTTLHistogram(acc, hostPort, namespace, set, n) if err != nil { continue } } if a.EnableObjectSizeLinearHistogram { - err = a.getObjectSizeLinearHistogram(hostPort, namespace, set, n, acc) + err = a.getObjectSizeLinearHistogram(acc, hostPort, namespace, set, n) if err != nil { continue } @@ -230,7 +237,7 @@ func (a *Aerospike) getNodeInfo(n *as.Node) (map[string]string, error) { return stats, nil } -func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, nodeName string, acc telegraf.Accumulator) { +func (a *Aerospike) parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, nodeName string) { tags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -238,12 +245,10 @@ func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, node fields := make(map[string]interface{}) for k, v := range stats { - val := parseValue(v) - fields[strings.Replace(k, "-", "_", -1)] = val + key := strings.Replace(k, "-", "_", -1) + fields[key] = parseAerospikeValue(key, v) } acc.AddFields("aerospike_node", fields, tags, time.Now()) - - return } func (a *Aerospike) getNamespaces(n *as.Node) ([]string, error) { @@ -269,8 +274,7 @@ func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node) (map[string]s return stats, err } -func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, namespace string, nodeName string, acc telegraf.Accumulator) { - +func (a *Aerospike) parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, namespace string, nodeName string) { nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -284,12 +288,10 @@ func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, if len(parts) < 2 { continue } - val := parseValue(parts[1]) - nFields[strings.Replace(parts[0], "-", "_", -1)] = val + key := strings.Replace(parts[0], "-", "_", -1) + nFields[key] = parseAerospikeValue(key, parts[1]) } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) - - return } func (a *Aerospike) getSets(n *as.Node) ([]string, error) { @@ -338,8 +340,7 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node) (map[string]stri return stats, nil } -func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, namespaceSet string, nodeName string, acc telegraf.Accumulator) { - +func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, namespaceSet string, nodeName string) { stat := strings.Split( strings.TrimSuffix( stats[fmt.Sprintf("sets/%s", namespaceSet)], ";"), ":") @@ -355,31 +356,32 @@ func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, names continue } - val := parseValue(pieces[1]) - nFields[strings.Replace(pieces[0], "-", "_", -1)] = val + key := strings.Replace(pieces[0], "-", "_", -1) + nFields[key] = parseAerospikeValue(key, pieces[1]) } acc.AddFields("aerospike_set", nFields, nTags, time.Now()) - - return } -func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { +func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort string, namespace string, set string, n *as.Node) error { stats, err := a.getHistogram(namespace, set, "ttl", n) if err != nil { return err } - a.parseHistogram(stats, hostPort, namespace, set, "ttl", n.GetName(), acc) + + nTags := createTags(hostPort, n.GetName(), namespace, set) + a.parseHistogram(acc, stats, nTags, "ttl") return nil } -func (a *Aerospike) getObjectSizeLinearHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { - +func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostPort string, namespace string, set string, n *as.Node) error { stats, err := a.getHistogram(namespace, set, "object-size-linear", n) if err != nil { return err } - a.parseHistogram(stats, hostPort, namespace, set, "object-size-linear", n.GetName(), acc) + + nTags := createTags(hostPort, n.GetName(), namespace, set) + a.parseHistogram(acc, stats, nTags, "object-size-linear") return nil } @@ -397,21 +399,9 @@ func (a *Aerospike) getHistogram(namespace string, set string, histogramType str return nil, err } return stats, nil - } -func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, namespace string, set string, histogramType string, nodeName string, acc telegraf.Accumulator) { - - nTags := map[string]string{ - "aerospike_host": hostPort, - "node_name": nodeName, - "namespace": namespace, - } - - if len(set) > 0 { - nTags["set"] = set - } - +func (a *Aerospike) parseHistogram(acc telegraf.Accumulator, stats map[string]string, nTags map[string]string, histogramType string) { nFields := make(map[string]interface{}) for _, stat := range stats { @@ -424,10 +414,10 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam if pieces[0] == "buckets" { buckets := strings.Split(pieces[1], ",") - // Normalize incase of less buckets than expected + // Normalize in case of less buckets than expected numRecordsPerBucket := 1 if len(buckets) > a.NumberHistogramBuckets { - numRecordsPerBucket = int(math.Ceil((float64(len(buckets)) / float64(a.NumberHistogramBuckets)))) + numRecordsPerBucket = int(math.Ceil(float64(len(buckets)) / float64(a.NumberHistogramBuckets))) } bucketCount := 0 @@ -436,7 +426,7 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam for i, bucket := range buckets { // Sum records and increment bucket collection counter if bucketCount < numRecordsPerBucket { - bucketSum = bucketSum + parseValue(bucket).(int64) + bucketSum = bucketSum + parseAerospikeValue("", bucket).(int64) bucketCount++ } @@ -454,23 +444,22 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam nFields[strconv.Itoa(bucketName)] = bucketSum } } - } } } acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.Replace(histogramType, "-", "_", -1)), nFields, nTags, time.Now()) - - return } -func splitNamespaceSet(namespaceSet string) (string, string) { +func splitNamespaceSet(namespaceSet string) (namespace string, set string) { split := strings.Split(namespaceSet, "/") return split[0], split[1] } -func parseValue(v string) interface{} { - if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { +func parseAerospikeValue(key string, v string) interface{} { + if protectedHexFields[key] { + return v + } else if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { return parsed } else if parsed, err := strconv.ParseUint(v, 10, 64); err == nil { return parsed @@ -482,12 +471,17 @@ func parseValue(v string) interface{} { } } -func copyTags(m map[string]string) map[string]string { - out := make(map[string]string) - for k, v := range m { - out[k] = v +func createTags(hostPort string, nodeName string, namespace string, set string) map[string]string { + nTags := map[string]string{ + "aerospike_host": hostPort, + "node_name": nodeName, + "namespace": namespace, + } + + if len(set) > 0 { + nTags["set"] = set } - return out + return nTags } func init() { diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index ee69f0049f401..ab93d4e2a185f 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -4,12 +4,12 @@ import ( "testing" as "github.com/aerospike/aerospike-client-go" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) -func TestAerospikeStatistics(t *testing.T) { +func TestAerospikeStatisticsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -23,18 +23,17 @@ func TestAerospikeStatistics(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasTag("aerospike_node", "node_name")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasTag("aerospike_namespace", "node_name")) - assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasTag("aerospike_node", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasTag("aerospike_namespace", "node_name")) + require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) namespaceName := acc.TagValue("aerospike_namespace", "namespace") - assert.Equal(t, namespaceName, "test") - + require.Equal(t, "test", namespaceName) } -func TestAerospikeStatisticsPartialErr(t *testing.T) { +func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -51,14 +50,14 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) { require.Error(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) namespaceName := acc.TagSetValue("aerospike_namespace", "namespace") - assert.Equal(t, namespaceName, "test") + require.Equal(t, "test", namespaceName) } -func TestSelectNamepsaces(t *testing.T) { +func TestSelectNamespacesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -74,25 +73,25 @@ func TestSelectNamepsaces(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasTag("aerospike_node", "node_name")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasTag("aerospike_namespace", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasTag("aerospike_node", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasTag("aerospike_namespace", "node_name")) // Expect only 1 namespace count := 0 for _, p := range acc.Metrics { if p.Measurement == "aerospike_namespace" { - count += 1 + count++ } } - assert.Equal(t, count, 1) + require.Equal(t, 1, count) // expect namespace to have no fields as nonexistent - assert.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining")) + require.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining")) } -func TestDisableQueryNamespaces(t *testing.T) { +func TestDisableQueryNamespacesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -108,18 +107,18 @@ func TestDisableQueryNamespaces(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.False(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.False(t, acc.HasMeasurement("aerospike_namespace")) a.DisableQueryNamespaces = false err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) } -func TestQuerySets(t *testing.T) { +func TestQuerySetsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -128,6 +127,7 @@ func TestQuerySets(t *testing.T) { // test is the default namespace from aerospike policy := as.NewClientPolicy() client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000) + require.NoError(t, err) key, err := as.NewKey("test", "foo", 123) require.NoError(t, err) @@ -159,16 +159,15 @@ func TestQuerySets(t *testing.T) { err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) - - assert.True(t, acc.HasMeasurement("aerospike_set")) - assert.True(t, acc.HasTag("aerospike_set", "set")) - assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) + require.True(t, acc.HasMeasurement("aerospike_set")) + require.True(t, acc.HasTag("aerospike_set", "set")) + require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) } -func TestSelectQuerySets(t *testing.T) { +func TestSelectQuerySetsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -177,6 +176,7 @@ func TestSelectQuerySets(t *testing.T) { // test is the default namespace from aerospike policy := as.NewClientPolicy() client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000) + require.NoError(t, err) key, err := as.NewKey("test", "foo", 123) require.NoError(t, err) @@ -209,16 +209,15 @@ func TestSelectQuerySets(t *testing.T) { err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) - assert.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) - - assert.True(t, acc.HasMeasurement("aerospike_set")) - assert.True(t, acc.HasTag("aerospike_set", "set")) - assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) + require.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) + require.True(t, acc.HasMeasurement("aerospike_set")) + require.True(t, acc.HasTag("aerospike_set", "set")) + require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) } -func TestDisableTTLHistogram(t *testing.T) { +func TestDisableTTLHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -236,9 +235,9 @@ func TestDisableTTLHistogram(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.False(t, acc.HasMeasurement("aerospike_histogram_ttl")) + require.False(t, acc.HasMeasurement("aerospike_histogram_ttl")) } -func TestTTLHistogram(t *testing.T) { +func TestTTLHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } else { @@ -253,7 +252,7 @@ func TestTTLHistogram(t *testing.T) { } /* Produces histogram - Measurment exists + Measurement exists Has appropriate tags (node name etc) Has appropriate keys (time:value) may be able to leverage histogram plugin @@ -262,11 +261,10 @@ func TestTTLHistogram(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_histogram_ttl")) - assert.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test")) - + require.True(t, acc.HasMeasurement("aerospike_histogram_ttl")) + require.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test")) } -func TestDisableObjectSizeLinearHistogram(t *testing.T) { +func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -284,10 +282,9 @@ func TestDisableObjectSizeLinearHistogram(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) + require.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) } -func TestObjectSizeLinearHistogram(t *testing.T) { - +func TestObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } else { @@ -302,7 +299,7 @@ func TestObjectSizeLinearHistogram(t *testing.T) { } /* Produces histogram - Measurment exists + Measurement exists Has appropriate tags (node name etc) Has appropriate keys (time:value) @@ -310,8 +307,8 @@ func TestObjectSizeLinearHistogram(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) - assert.True(t, FindTagValue(&acc, "aerospike_histogram_object_size_linear", "namespace", "test")) + require.True(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) + require.True(t, FindTagValue(&acc, "aerospike_histogram_object_size_linear", "namespace", "test")) } func TestParseNodeInfo(t *testing.T) { @@ -335,7 +332,7 @@ func TestParseNodeInfo(t *testing.T) { "node_name": "TestNodeName", } - a.parseNodeInfo(stats, "127.0.0.1:3000", "TestNodeName", &acc) + a.parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags) } @@ -361,7 +358,7 @@ func TestParseNamespaceInfo(t *testing.T) { "namespace": "test", } - a.parseNamespaceInfo(stats, "127.0.0.1:3000", "test", "TestNodeName", &acc) + a.parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags) } @@ -385,7 +382,7 @@ func TestParseSetInfo(t *testing.T) { "node_name": "TestNodeName", "set": "test/foo", } - a.parseSetInfo(stats, "127.0.0.1:3000", "test/foo", "TestNodeName", &acc) + a.parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags) } @@ -417,9 +414,9 @@ func TestParseHistogramSet(t *testing.T) { "set": "foo", } - a.parseHistogram(stats, "127.0.0.1:3000", "test", "foo", "object-size-linear", "TestNodeName", &acc) + nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "foo") + a.parseHistogram(&acc, stats, nTags, "object-size-linear") acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) - } func TestParseHistogramNamespace(t *testing.T) { a := &Aerospike{ @@ -448,25 +445,33 @@ func TestParseHistogramNamespace(t *testing.T) { "namespace": "test", } - a.parseHistogram(stats, "127.0.0.1:3000", "test", "", "object-size-linear", "TestNodeName", &acc) + nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "") + a.parseHistogram(&acc, stats, nTags, "object-size-linear") acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) - } func TestAerospikeParseValue(t *testing.T) { // uint64 with value bigger than int64 max - val := parseValue("18446744041841121751") + val := parseAerospikeValue("", "18446744041841121751") require.Equal(t, uint64(18446744041841121751), val) - val = parseValue("true") + val = parseAerospikeValue("", "true") require.Equal(t, true, val) // int values - val = parseValue("42") - require.Equal(t, val, int64(42), "must be parsed as int") + val = parseAerospikeValue("", "42") + require.Equal(t, int64(42), val, "must be parsed as an int64") // string values - val = parseValue("BB977942A2CA502") - require.Equal(t, val, `BB977942A2CA502`, "must be left as string") + val = parseAerospikeValue("", "BB977942A2CA502") + require.Equal(t, `BB977942A2CA502`, val, "must be left as a string") + + // all digit hex values, unprotected + val = parseAerospikeValue("", "1992929191") + require.Equal(t, int64(1992929191), val, "must be parsed as an int64") + + // all digit hex values, protected + val = parseAerospikeValue("node_name", "1992929191") + require.Equal(t, `1992929191`, val, "must be left as a string") } func FindTagValue(acc *testutil.Accumulator, measurement string, key string, value string) bool { @@ -476,7 +481,6 @@ func FindTagValue(acc *testutil.Accumulator, measurement string, key string, val if ok && v == value { return true } - } } return false diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md new file mode 100644 index 0000000000000..4e351ea6d8b37 --- /dev/null +++ b/plugins/inputs/aliyuncms/README.md @@ -0,0 +1,147 @@ +# Alibaba (Aliyun) CloudMonitor Service Statistics Input Plugin +Here and after we use `Aliyun` instead `Alibaba` as it is default naming across web console and docs. + +This plugin will pull Metric Statistics from Aliyun CMS. + +### Aliyun Authentication + +This plugin uses an [AccessKey](https://www.alibabacloud.com/help/doc-detail/53045.htm?spm=a2c63.p38356.b99.127.5cba21fdt5MJKr&parentId=28572) credential for Authentication with the Aliyun OpenAPI endpoint. +In the following order the plugin will attempt to authenticate. +1. Ram RoleARN credential if `access_key_id`, `access_key_secret`, `role_arn`, `role_session_name` is specified +2. AccessKey STS token credential if `access_key_id`, `access_key_secret`, `access_key_sts_token` is specified +3. AccessKey credential if `access_key_id`, `access_key_secret` is specified +4. Ecs Ram Role Credential if `role_name` is specified +5. RSA keypair credential if `private_key`, `public_key_id` is specified +6. Environment variables credential +7. Instance metadata credential + +### Configuration: + +```toml + ## Aliyun Credentials + ## Credentials are loaded in the following order + ## 1) Ram RoleArn credential + ## 2) AccessKey STS token credential + ## 3) AccessKey credential + ## 4) Ecs Ram Role credential + ## 5) RSA keypair credential + ## 6) Environment variables credential + ## 7) Instance metadata credential + + # access_key_id = "" + # access_key_secret = "" + # access_key_sts_token = "" + # role_arn = "" + # role_session_name = "" + # private_key = "" + # public_key_id = "" + # role_name = "" + + ## Specify the ali cloud region list to be queried for metrics and objects discovery + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + ## Default supported regions are: + ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, + ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, + ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 + ## + ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - for example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + regions = ["cn-hongkong"] + + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. + # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Aliyun OpenAPI + # and will not be collected by Telegraf. + # + ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) + delay = "1m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Metric Statistic Project (required) + project = "acs_slb_dashboard" + + ## Maximum requests per second, default value is 200 + ratelimit = 200 + + ## How often the discovery API call executed (default 1m) + #discovery_interval = "1m" + + ## Metrics to Pull (Required) + [[inputs.aliyuncms.metrics]] + ## Metrics names to be requested, + ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] + + ## Dimension filters for Metric (these are optional). + ## This allows to get additional metric dimension. If dimension is not specified it can be returned or + ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) + ## Values specified here would be added into the list of discovered objects. + ## You can specify either single dimension: + #dimensions = '{"instanceId": "p-example"}' + + ## Or you can specify several dimensions at once: + #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is : + ## To figure out which fields are available, consult the Describe API per project. + ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + #tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] + ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. + + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope + #allow_dps_without_discovery = false +``` + +#### Requirements and Terminology + +Plugin Configuration utilizes [preset metric items references](https://www.alibabacloud.com/help/doc-detail/28619.htm?spm=a2c63.p38356.a3.2.389f233d0kPJn0) + +- `discovery_region` must be a valid Aliyun [Region](https://www.alibabacloud.com/help/doc-detail/40654.htm) value +- `period` must be a valid duration value +- `project` must be a preset project value +- `names` must be preset metric names +- `dimensions` must be preset dimension values + +### Measurements & Fields: + +Each Aliyun CMS Project monitored records a measurement with fields for each available Metric Statistic +Project and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) + +- aliyuncms_{project} + - {metric}_average (metric Average value) + - {metric}_minimum (metric Minimum value) + - {metric}_maximum (metric Maximum value) + - {metric}_value (metric Value value) + +### Example Output: + +``` +$ ./telegraf --config telegraf.conf --input-filter aliyuncms --test +> aliyuncms_acs_slb_dashboard,instanceId=p-example,regionId=cn-hangzhou,userId=1234567890 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875 +``` \ No newline at end of file diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go new file mode 100644 index 0000000000000..1dc20d7187853 --- /dev/null +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -0,0 +1,602 @@ +package aliyuncms + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/jmespath/go-jmespath" + "github.com/pkg/errors" +) + +const ( + description = "Pull Metric Statistics from Aliyun CMS" + sampleConfig = ` + ## Aliyun Credentials + ## Credentials are loaded in the following order + ## 1) Ram RoleArn credential + ## 2) AccessKey STS token credential + ## 3) AccessKey credential + ## 4) Ecs Ram Role credential + ## 5) RSA keypair credential + ## 6) Environment variables credential + ## 7) Instance metadata credential + + # access_key_id = "" + # access_key_secret = "" + # access_key_sts_token = "" + # role_arn = "" + # role_session_name = "" + # private_key = "" + # public_key_id = "" + # role_name = "" + + ## Specify the ali cloud region list to be queried for metrics and objects discovery + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + ## Default supported regions are: + ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, + ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, + ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 + ## + ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - for example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + regions = ["cn-hongkong"] + + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. + # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Aliyun OpenAPI + # and will not be collected by Telegraf. + # + ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) + delay = "1m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Metric Statistic Project (required) + project = "acs_slb_dashboard" + + ## Maximum requests per second, default value is 200 + ratelimit = 200 + + ## How often the discovery API call executed (default 1m) + #discovery_interval = "1m" + + ## Metrics to Pull (Required) + [[inputs.aliyuncms.metrics]] + ## Metrics names to be requested, + ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] + + ## Dimension filters for Metric (these are optional). + ## This allows to get additional metric dimension. If dimension is not specified it can be returned or + ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) + ## Values specified here would be added into the list of discovered objects. + ## You can specify either single dimension: + #dimensions = '{"instanceId": "p-example"}' + + ## Or you can specify several dimensions at once: + #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is : + ## To figure out which fields are available, consult the Describe API per project. + ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + #tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] + ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. + + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope + #allow_dps_without_discovery = false +` +) + +type ( + // AliyunCMS is aliyun cms config info. + AliyunCMS struct { + AccessKeyID string `toml:"access_key_id"` + AccessKeySecret string `toml:"access_key_secret"` + AccessKeyStsToken string `toml:"access_key_sts_token"` + RoleArn string `toml:"role_arn"` + RoleSessionName string `toml:"role_session_name"` + PrivateKey string `toml:"private_key"` + PublicKeyID string `toml:"public_key_id"` + RoleName string `toml:"role_name"` + + Regions []string `toml:"regions"` + DiscoveryInterval config.Duration `toml:"discovery_interval"` + Period config.Duration `toml:"period"` + Delay config.Duration `toml:"delay"` + Project string `toml:"project"` + Metrics []*Metric `toml:"metrics"` + RateLimit int `toml:"ratelimit"` + + Log telegraf.Logger `toml:"-"` + + client aliyuncmsClient + windowStart time.Time + windowEnd time.Time + dt *discoveryTool + dimensionKey string + discoveryData map[string]interface{} + measurement string + } + + // Metric describes what metrics to get + Metric struct { + ObjectsFilter string `toml:"objects_filter"` + MetricNames []string `toml:"names"` + Dimensions string `toml:"dimensions"` //String representation of JSON dimensions + TagsQueryPath []string `toml:"tag_query_path"` + AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` //Allow data points without discovery data (if no discovery data found) + + dtLock sync.Mutex //Guard for discoveryTags & dimensions + discoveryTags map[string]map[string]string //Internal data structure that can enrich metrics with tags + dimensionsUdObj map[string]string + dimensionsUdArr []map[string]string //Parsed Dimesnsions JSON string (unmarshalled) + requestDimensions []map[string]string //this is the actual dimensions list that would be used in API request + requestDimensionsStr string //String representation of the above + + } + + // Dimension describe how to get metrics + Dimension struct { + Value string `toml:"value"` + } + + aliyuncmsClient interface { + DescribeMetricList(request *cms.DescribeMetricListRequest) (response *cms.DescribeMetricListResponse, err error) + } +) + +// https://www.alibabacloud.com/help/doc-detail/40654.htm?gclid=Cj0KCQjw4dr0BRCxARIsAKUNjWTAMfyVUn_Y3OevFBV3CMaazrhq0URHsgE7c0m0SeMQRKlhlsJGgIEaAviyEALw_wcB +var aliyunRegionList = []string{ + "cn-qingdao", + "cn-beijing", + "cn-zhangjiakou", + "cn-huhehaote", + "cn-hangzhou", + "cn-shanghai", + "cn-shenzhen", + "cn-heyuan", + "cn-chengdu", + "cn-hongkong", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-5", + "ap-south-1", + "ap-northeast-1", + "us-west-1", + "us-east-1", + "eu-central-1", + "eu-west-1", + "me-east-1", +} + +// SampleConfig implements telegraf.Inputs interface +func (s *AliyunCMS) SampleConfig() string { + return sampleConfig +} + +// Description implements telegraf.Inputs interface +func (s *AliyunCMS) Description() string { + return description +} + +// Init perform checks of plugin inputs and initialize internals +func (s *AliyunCMS) Init() error { + if s.Project == "" { + return errors.New("project is not set") + } + + var ( + roleSessionExpiration = 600 + sessionExpiration = 600 + ) + configuration := &providers.Configuration{ + AccessKeyID: s.AccessKeyID, + AccessKeySecret: s.AccessKeySecret, + AccessKeyStsToken: s.AccessKeyStsToken, + RoleArn: s.RoleArn, + RoleSessionName: s.RoleSessionName, + RoleSessionExpiration: &roleSessionExpiration, + PrivateKey: s.PrivateKey, + PublicKeyID: s.PublicKeyID, + SessionExpiration: &sessionExpiration, + RoleName: s.RoleName, + } + credentialProviders := []providers.Provider{ + providers.NewConfigurationCredentialProvider(configuration), + providers.NewEnvCredentialProvider(), + providers.NewInstanceMetadataProvider(), + } + credential, err := providers.NewChainProvider(credentialProviders).Retrieve() + if err != nil { + return errors.Errorf("failed to retrieve credential: %v", err) + } + s.client, err = cms.NewClientWithOptions("", sdk.NewConfig(), credential) + if err != nil { + return errors.Errorf("failed to create cms client: %v", err) + } + + //check metrics dimensions consistency + for _, metric := range s.Metrics { + if metric.Dimensions != "" { + metric.dimensionsUdObj = map[string]string{} + metric.dimensionsUdArr = []map[string]string{} + err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj) + if err != nil { + err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr) + return errors.Errorf("Can't parse dimensions (it is neither obj, nor array) %q :%v", metric.Dimensions, err) + } + } + } + + s.measurement = formatMeasurement(s.Project) + + //Check regions + if len(s.Regions) == 0 { + s.Regions = aliyunRegionList + s.Log.Infof("'regions' is not set. Metrics will be queried across %d regions:\n%s", + len(s.Regions), strings.Join(s.Regions, ",")) + } + + //Init discovery... + if s.dt == nil { //Support for tests + s.dt, err = newDiscoveryTool(s.Regions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) + if err != nil { + s.Log.Errorf("Discovery tool is not activated: %v", err) + s.dt = nil + return nil + } + } + + s.discoveryData, err = s.dt.getDiscoveryDataAcrossRegions(nil) + if err != nil { + s.Log.Errorf("Discovery tool is not activated: %v", err) + s.dt = nil + return nil + } + + s.Log.Infof("%d object(s) discovered...", len(s.discoveryData)) + + //Special setting for acs_oss project since the API differs + if s.Project == "acs_oss" { + s.dimensionKey = "BucketName" + } + + return nil +} + +// Start plugin discovery loop, metrics are gathered through Gather +func (s *AliyunCMS) Start(telegraf.Accumulator) error { + //Start periodic discovery process + if s.dt != nil { + s.dt.start() + } + + return nil +} + +// Gather implements telegraf.Inputs interface +func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error { + s.updateWindow(time.Now()) + + // limit concurrency or we can easily exhaust user connection limit + lmtr := limiter.NewRateLimiter(s.RateLimit, time.Second) + defer lmtr.Stop() + + var wg sync.WaitGroup + for _, metric := range s.Metrics { + //Prepare internal structure with data from discovery + s.prepareTagsAndDimensions(metric) + wg.Add(len(metric.MetricNames)) + for _, metricName := range metric.MetricNames { + <-lmtr.C + go func(metricName string, metric *Metric) { + defer wg.Done() + acc.AddError(s.gatherMetric(acc, metricName, metric)) + }(metricName, metric) + } + wg.Wait() + } + + return nil +} + +// Stop - stops the plugin discovery loop +func (s *AliyunCMS) Stop() { + if s.dt != nil { + s.dt.stop() + } +} + +func (s *AliyunCMS) updateWindow(relativeTo time.Time) { + //https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR + //The start and end times are executed in the mode of + //opening left and closing right, and startTime cannot be equal + //to or greater than endTime. + + windowEnd := relativeTo.Add(-time.Duration(s.Delay)) + + if s.windowEnd.IsZero() { + // this is the first run, no window info, so just get a single period + s.windowStart = windowEnd.Add(-time.Duration(s.Period)) + } else { + // subsequent window, start where last window left off + s.windowStart = s.windowEnd + } + + s.windowEnd = windowEnd +} + +// Gather given metric and emit error +func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *Metric) error { + for _, region := range s.Regions { + req := cms.CreateDescribeMetricListRequest() + req.Period = strconv.FormatInt(int64(time.Duration(s.Period).Seconds()), 10) + req.MetricName = metricName + req.Length = "10000" + req.Namespace = s.Project + req.EndTime = strconv.FormatInt(s.windowEnd.Unix()*1000, 10) + req.StartTime = strconv.FormatInt(s.windowStart.Unix()*1000, 10) + req.Dimensions = metric.requestDimensionsStr + req.RegionId = region + + for more := true; more; { + resp, err := s.client.DescribeMetricList(req) + if err != nil { + return errors.Errorf("failed to query metricName list: %v", err) + } + if resp.Code != "200" { + s.Log.Errorf("failed to query metricName list: %v", resp.Message) + break + } + + var datapoints []map[string]interface{} + if err := json.Unmarshal([]byte(resp.Datapoints), &datapoints); err != nil { + return errors.Errorf("failed to decode response datapoints: %v", err) + } + + if len(datapoints) == 0 { + s.Log.Debugf("No metrics returned from CMS, response msg: %s", resp.Message) + break + } + + NextDataPoint: + for _, datapoint := range datapoints { + fields := map[string]interface{}{} + datapointTime := int64(0) + tags := map[string]string{} + for key, value := range datapoint { + switch key { + case "instanceId", "BucketName": + tags[key] = value.(string) + if metric.discoveryTags != nil { //discovery can be not activated + //Skipping data point if discovery data not exist + _, ok := metric.discoveryTags[value.(string)] + if !ok && + !metric.AllowDataPointWODiscoveryData { + s.Log.Warnf("Instance %q is not found in discovery, skipping monitoring datapoint...", value.(string)) + continue NextDataPoint + } + + for k, v := range metric.discoveryTags[value.(string)] { + tags[k] = v + } + } + case "userId": + tags[key] = value.(string) + case "timestamp": + datapointTime = int64(value.(float64)) / 1000 + default: + fields[formatField(metricName, key)] = value + } + } + //Log.logW("Datapoint time: %s, now: %s", time.Unix(datapointTime, 0).Format(time.RFC3339), time.Now().Format(time.RFC3339)) + acc.AddFields(s.measurement, fields, tags, time.Unix(datapointTime, 0)) + } + + req.NextToken = resp.NextToken + more = req.NextToken != "" + } + } + return nil +} + +//tag helper +func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string, err error) { + var ( + ok bool + queryPath = tagSpec + ) + tagKey = tagSpec + + //Split query path to tagKey and query path + if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 { + tagKey = splitted[0] + queryPath = splitted[1] + } + + tagRawValue, err := jmespath.Search(queryPath, data) + if err != nil { + return "", "", errors.Errorf("Can't query data from discovery data using query path %q: %v", + queryPath, err) + } + + if tagRawValue == nil { //Nothing found + return "", "", nil + } + + tagValue, ok = tagRawValue.(string) + if !ok { + return "", "", errors.Errorf("Tag value %v parsed by query %q is not a string value", + tagRawValue, queryPath) + } + + return tagKey, tagValue, nil +} + +func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) { + var ( + newData bool + defaulTags = []string{"RegionId:RegionId"} + ) + + if s.dt == nil { //Discovery is not activated + return + } + + //Reading all data from buffered channel +L: + for { + select { + case s.discoveryData = <-s.dt.dataChan: + newData = true + continue + default: + break L + } + } + + //new data arrives (so process it) or this is the first call + if newData || len(metric.discoveryTags) == 0 { + metric.dtLock.Lock() + defer metric.dtLock.Unlock() + + if metric.discoveryTags == nil { + metric.discoveryTags = make(map[string]map[string]string, len(s.discoveryData)) + } + + metric.requestDimensions = nil //erasing + metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData)) + + //Preparing tags & dims... + for instanceID, elem := range s.discoveryData { + //Start filing tags + //Remove old value if exist + delete(metric.discoveryTags, instanceID) + metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) + + for _, tagQueryPath := range metric.TagsQueryPath { + tagKey, tagValue, err := parseTag(tagQueryPath, elem) + if err != nil { + s.Log.Errorf("%v", err) + continue + } + if err == nil && tagValue == "" { //Nothing found + s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID) + continue + } + + metric.discoveryTags[instanceID][tagKey] = tagValue + } + + //Adding default tags if not already there + for _, defaultTagQP := range defaulTags { + tagKey, tagValue, err := parseTag(defaultTagQP, elem) + + if err != nil { + s.Log.Errorf("%v", err) + continue + } + + if err == nil && tagValue == "" { //Nothing found + s.Log.Debugf("Data by query path %q: is not found, for instance %q", + defaultTagQP, instanceID) + continue + } + + metric.discoveryTags[instanceID][tagKey] = tagValue + } + + //Preparing dimensions (first adding dimensions that comes from discovery data) + metric.requestDimensions = append( + metric.requestDimensions, + map[string]string{s.dimensionKey: instanceID}) + } + + //Get final dimension (need to get full lis of + //what was provided in config + what comes from discovery + if len(metric.dimensionsUdArr) != 0 { + metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdArr...) + } + if len(metric.dimensionsUdObj) != 0 { + metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdObj) + } + + //Unmarshalling to string + reqDim, err := json.Marshal(metric.requestDimensions) + if err != nil { + s.Log.Errorf("Can't marshal metric request dimensions %v :%v", + metric.requestDimensions, err) + metric.requestDimensionsStr = "" + } else { + metric.requestDimensionsStr = string(reqDim) + } + } +} + +// Formatting helpers +func formatField(metricName string, statistic string) string { + if metricName == statistic { + statistic = "value" + } + return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic)) +} + +func formatMeasurement(project string) string { + project = strings.Replace(project, "/", "_", -1) + project = snakeCase(project) + return fmt.Sprintf("aliyuncms_%s", project) +} + +func snakeCase(s string) string { + s = internal.SnakeCase(s) + s = strings.Replace(s, "__", "_", -1) + return s +} + +func init() { + inputs.Add("aliyuncms", func() telegraf.Input { + return &AliyunCMS{ + RateLimit: 200, + DiscoveryInterval: config.Duration(time.Minute), + dimensionKey: "instanceId", + } + }) +} diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go new file mode 100644 index 0000000000000..7e346a6ae9b8e --- /dev/null +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -0,0 +1,416 @@ +package aliyuncms + +import ( + "bytes" + "io" + "net/http" + "testing" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" +) + +const inputTitle = "inputs.aliyuncms" + +type mockGatherAliyunCMSClient struct{} + +func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { + resp := new(cms.DescribeMetricListResponse) + + //switch request.Metric { + switch request.MetricName { + case "InstanceActiveConnection": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = ` + [{ + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", + "Average": 150, + "Value": 300 + }]` + case "ErrorCode": + resp.Code = "404" + resp.Message = "ErrorCode" + case "ErrorDatapoint": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = ` + [{ + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", + "Average": 150, + }]` + case "EmptyDatapoint": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = `[]` + case "ErrorResp": + return nil, errors.New("error response") + } + return resp, nil +} + +type mockAliyunSDKCli struct { + resp *responses.CommonResponse +} + +func (m *mockAliyunSDKCli) ProcessCommonRequest(_ *requests.CommonRequest) (response *responses.CommonResponse, err error) { + return m.resp, nil +} + +func getDiscoveryTool(project string, discoverRegions []string) (*discoveryTool, error) { + var ( + err error + credential auth.Credential + ) + + configuration := &providers.Configuration{ + AccessKeyID: "dummyKey", + AccessKeySecret: "dummySecret", + } + credentialProviders := []providers.Provider{ + providers.NewConfigurationCredentialProvider(configuration), + providers.NewEnvCredentialProvider(), + providers.NewInstanceMetadataProvider(), + } + credential, err = providers.NewChainProvider(credentialProviders).Retrieve() + if err != nil { + return nil, errors.Errorf("failed to retrieve credential: %v", err) + } + + dt, err := newDiscoveryTool(discoverRegions, project, testutil.Logger{Name: inputTitle}, credential, 1, time.Minute*2) + + if err != nil { + return nil, errors.Errorf("Can't create discovery tool object: %v", err) + } + return dt, nil +} + +func getMockSdkCli(httpResp *http.Response) (mockAliyunSDKCli, error) { + resp := responses.NewCommonResponse() + if err := responses.Unmarshal(resp, httpResp, "JSON"); err != nil { + return mockAliyunSDKCli{}, errors.Errorf("Can't parse response: %v", err) + } + return mockAliyunSDKCli{resp: resp}, nil +} + +func TestPluginDefaults(t *testing.T) { + require.Equal(t, &AliyunCMS{RateLimit: 200, + DiscoveryInterval: config.Duration(time.Minute), + dimensionKey: "instanceId", + }, inputs.Inputs["aliyuncms"]()) +} + +func TestPluginInitialize(t *testing.T) { + var err error + + plugin := new(AliyunCMS) + plugin.Log = testutil.Logger{Name: inputTitle} + plugin.Regions = []string{"cn-shanghai"} + plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.Regions) + if err != nil { + t.Fatalf("Can't create discovery tool object: %v", err) + } + + httpResp := &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewBufferString( + `{ + "LoadBalancers": + { + "LoadBalancer": [ + {"LoadBalancerId":"bla"} + ] + }, + "TotalCount": 1, + "PageSize": 1, + "PageNumber": 1 + }`)), + } + mockCli, err := getMockSdkCli(httpResp) + if err != nil { + t.Fatalf("Can't create mock sdk cli: %v", err) + } + plugin.dt.cli = map[string]aliyunSdkClient{plugin.Regions[0]: &mockCli} + + tests := []struct { + name string + project string + accessKeyID string + accessKeySecret string + expectedErrorString string + regions []string + discoveryRegions []string + }{ + { + name: "Empty project", + expectedErrorString: "project is not set", + regions: []string{"cn-shanghai"}, + }, + { + name: "Valid project", + project: "acs_slb_dashboard", + regions: []string{"cn-shanghai"}, + accessKeyID: "dummy", + accessKeySecret: "dummy", + }, + { + name: "'regions' is not set", + project: "acs_slb_dashboard", + accessKeyID: "dummy", + accessKeySecret: "dummy", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin.Project = tt.project + plugin.AccessKeyID = tt.accessKeyID + plugin.AccessKeySecret = tt.accessKeySecret + plugin.Regions = tt.regions + + if tt.expectedErrorString != "" { + require.EqualError(t, plugin.Init(), tt.expectedErrorString) + } else { + require.Equal(t, nil, plugin.Init()) + } + if len(tt.regions) == 0 { //Check if set to default + require.Equal(t, plugin.Regions, aliyunRegionList) + } + }) + } +} + +func TestUpdateWindow(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := config.Duration(duration) + + plugin := &AliyunCMS{ + Project: "acs_slb_dashboard", + Period: internalDuration, + Delay: internalDuration, + Log: testutil.Logger{Name: inputTitle}, + } + + now := time.Now() + + require.True(t, plugin.windowEnd.IsZero()) + require.True(t, plugin.windowStart.IsZero()) + + plugin.updateWindow(now) + + newStartTime := plugin.windowEnd + + // initial window just has a single period + require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay))) + require.EqualValues(t, plugin.windowStart, now.Add(-time.Duration(plugin.Delay)).Add(-time.Duration(plugin.Period))) + + now = time.Now() + plugin.updateWindow(now) + + // subsequent window uses previous end time as start time + require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay))) + require.EqualValues(t, plugin.windowStart, newStartTime) +} + +func TestGatherMetric(t *testing.T) { + plugin := &AliyunCMS{ + Project: "acs_slb_dashboard", + client: new(mockGatherAliyunCMSClient), + measurement: formatMeasurement("acs_slb_dashboard"), + Log: testutil.Logger{Name: inputTitle}, + Regions: []string{"cn-shanghai"}, + } + + metric := &Metric{ + MetricNames: []string{}, + Dimensions: `"instanceId": "i-abcdefgh123456"`, + } + + tests := []struct { + name string + metricName string + expectedErrorString string + }{ + { + name: "Datapoint with corrupted JSON", + metricName: "ErrorDatapoint", + expectedErrorString: `failed to decode response datapoints: invalid character '}' looking for beginning of object key string`, + }, + { + name: "General CMS response error", + metricName: "ErrorResp", + expectedErrorString: "failed to query metricName list: error response", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc telegraf.Accumulator + require.EqualError(t, plugin.gatherMetric(acc, tt.metricName, metric), tt.expectedErrorString) + }) + } +} + +func TestGather(t *testing.T) { + metric := &Metric{ + MetricNames: []string{}, + Dimensions: `{"instanceId": "i-abcdefgh123456"}`, + } + plugin := &AliyunCMS{ + AccessKeyID: "my_access_key_id", + AccessKeySecret: "my_access_key_secret", + Project: "acs_slb_dashboard", + Metrics: []*Metric{metric}, + RateLimit: 200, + measurement: formatMeasurement("acs_slb_dashboard"), + Regions: []string{"cn-shanghai"}, + client: new(mockGatherAliyunCMSClient), + Log: testutil.Logger{Name: inputTitle}, + } + + //test table: + tests := []struct { + name string + hasMeasurment bool + metricNames []string + expected []telegraf.Metric + }{ + { + name: "Empty data point", + metricNames: []string{"EmptyDatapoint"}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "aliyuncms_acs_slb_dashboard", + nil, + nil, + time.Time{}), + }, + }, + { + name: "Data point with fields & tags", + hasMeasurment: true, + metricNames: []string{"InstanceActiveConnection"}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "aliyuncms_acs_slb_dashboard", + map[string]string{ + "instanceId": "i-abcdefgh123456", + "userId": "1234567898765432", + }, + map[string]interface{}{ + "instance_active_connection_minimum": float64(100), + "instance_active_connection_maximum": float64(200), + "instance_active_connection_average": float64(150), + "instance_active_connection_value": float64(300), + }, + time.Unix(1490152860000, 0)), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + plugin.Metrics[0].MetricNames = tt.metricNames + require.Empty(t, acc.GatherError(plugin.Gather)) + require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurment) + if tt.hasMeasurment { + acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags()) + } + }) + } +} + +func TestGetDiscoveryDataAcrossRegions(t *testing.T) { + //test table: + tests := []struct { + name string + project string + region string + httpResp *http.Response + discData map[string]interface{} + totalCount int + pageSize int + pageNumber int + expectedErrorString string + }{ + { + name: "No root key in discovery response", + project: "acs_slb_dashboard", + region: "cn-hongkong", + httpResp: &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewBufferString(`{}`)), + }, + totalCount: 0, + pageSize: 0, + pageNumber: 0, + expectedErrorString: `Didn't find root key "LoadBalancers" in discovery response`, + }, + { + name: "1 object discovered", + project: "acs_slb_dashboard", + region: "cn-hongkong", + httpResp: &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewBufferString( + `{ + "LoadBalancers": + { + "LoadBalancer": [ + {"LoadBalancerId":"bla"} + ] + }, + "TotalCount": 1, + "PageSize": 1, + "PageNumber": 1 + }`)), + }, + discData: map[string]interface{}{"bla": map[string]interface{}{"LoadBalancerId": "bla"}}, + totalCount: 1, + pageSize: 1, + pageNumber: 1, + expectedErrorString: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dt, err := getDiscoveryTool(tt.project, []string{tt.region}) + if err != nil { + t.Fatalf("Can't create discovery tool object: %v", err) + } + + mockCli, err := getMockSdkCli(tt.httpResp) + if err != nil { + t.Fatalf("Can't create mock sdk cli: %v", err) + } + dt.cli = map[string]aliyunSdkClient{tt.region: &mockCli} + data, err := dt.getDiscoveryDataAcrossRegions(nil) + + require.Equal(t, tt.discData, data) + if err != nil { + require.EqualError(t, err, tt.expectedErrorString) + } + }) + } +} diff --git a/plugins/inputs/aliyuncms/discovery.go b/plugins/inputs/aliyuncms/discovery.go new file mode 100644 index 0000000000000..a6fe5471beecf --- /dev/null +++ b/plugins/inputs/aliyuncms/discovery.go @@ -0,0 +1,491 @@ +package aliyuncms + +import ( + "encoding/json" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/ecs" + "github.com/aliyun/alibaba-cloud-sdk-go/services/rds" + "github.com/aliyun/alibaba-cloud-sdk-go/services/slb" + "github.com/aliyun/alibaba-cloud-sdk-go/services/vpc" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/pkg/errors" +) + +type discoveryRequest interface { +} + +type aliyunSdkClient interface { + ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) +} + +// discoveryTool is a object that provides discovery feature +type discoveryTool struct { + req map[string]discoveryRequest //Discovery request (specific per object type) + rateLimit int //Rate limit for API query, as it is limited by API backend + reqDefaultPageSize int //Default page size while querying data from API (how many objects per request) + cli map[string]aliyunSdkClient //API client, which perform discovery request + + respRootKey string //Root key in JSON response where to look for discovery data + respObjectIDKey string //Key in element of array under root key, that stores object ID + //for ,majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering// ) + wg sync.WaitGroup //WG for primary discovery goroutine + interval time.Duration //Discovery interval + done chan bool //Done channel to stop primary discovery goroutine + dataChan chan map[string]interface{} //Discovery data + lg telegraf.Logger //Telegraf logger (should be provided) +} + +type parsedDResp struct { + data []interface{} + totalCount int + pageSize int + pageNumber int +} + +//getRPCReqFromDiscoveryRequest - utility function to map between aliyun request primitives +//discoveryRequest represents different type of discovery requests +func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) { + if reflect.ValueOf(req).Type().Kind() != reflect.Ptr || + reflect.ValueOf(req).IsNil() { + return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind()) + } + + ptrV := reflect.Indirect(reflect.ValueOf(req)) + + for i := 0; i < ptrV.NumField(); i++ { + if ptrV.Field(i).Type().String() == "*requests.RpcRequest" { + if !ptrV.Field(i).CanInterface() { + return nil, errors.Errorf("Can't get interface of %v", ptrV.Field(i)) + } + + rpcReq, ok := ptrV.Field(i).Interface().(*requests.RpcRequest) + + if !ok { + return nil, errors.Errorf("Cant convert interface of %v to '*requests.RpcRequest' type", ptrV.Field(i).Interface()) + } + + return rpcReq, nil + } + } + return nil, errors.Errorf("Didn't find *requests.RpcRequest embedded struct in %q", ptrV.Type()) +} + +//newDiscoveryTool function returns discovery tool object. +//The object is used to periodically get data about aliyun objects and send this +//data into channel. The intention is to enrich reported metrics with discovery data. +//Discovery is supported for a limited set of object types (defined by project) and can be extended in future. +//Discovery can be limited by region if not set, then all regions is queried. +//Request against API can inquire additional costs, consult with aliyun API documentation. +func newDiscoveryTool(regions []string, project string, lg telegraf.Logger, credential auth.Credential, rateLimit int, discoveryInterval time.Duration) (*discoveryTool, error) { + var ( + dscReq = map[string]discoveryRequest{} + cli = map[string]aliyunSdkClient{} + parseRootKey = regexp.MustCompile(`Describe(.*)`) + responseRootKey string + responseObjectIDKey string + err error + noDiscoverySupportErr = errors.Errorf("no discovery support for project %q", project) + ) + + if len(regions) == 0 { + regions = aliyunRegionList + lg.Infof("'regions' is not provided! Discovery data will be queried across %d regions:\n%s", + len(aliyunRegionList), strings.Join(aliyunRegionList, ",")) + } + + if rateLimit == 0 { //Can be a rounding case + rateLimit = 1 + } + + for _, region := range regions { + switch project { + case "acs_ecs_dashboard": + dscReq[region] = ecs.CreateDescribeInstancesRequest() + responseObjectIDKey = "InstanceId" + case "acs_rds_dashboard": + dscReq[region] = rds.CreateDescribeDBInstancesRequest() + responseObjectIDKey = "DBInstanceId" + case "acs_slb_dashboard": + dscReq[region] = slb.CreateDescribeLoadBalancersRequest() + responseObjectIDKey = "LoadBalancerId" + case "acs_memcache": + return nil, noDiscoverySupportErr + case "acs_ocs": + return nil, noDiscoverySupportErr + case "acs_oss": + //oss is really complicated + //it is on it's own format + return nil, noDiscoverySupportErr + + //As a possible solution we can + //mimic to request format supported by oss + + //req := DescribeLOSSRequest{ + // RpcRequest: &requests.RpcRequest{}, + //} + //req.InitWithApiInfo("oss", "2014-08-15", "DescribeDBInstances", "oss", "openAPI") + case "acs_vpc_eip": + dscReq[region] = vpc.CreateDescribeEipAddressesRequest() + responseObjectIDKey = "AllocationId" + case "acs_kvstore": + return nil, noDiscoverySupportErr + case "acs_mns_new": + return nil, noDiscoverySupportErr + case "acs_cdn": + //API replies are in its own format. + return nil, noDiscoverySupportErr + case "acs_polardb": + return nil, noDiscoverySupportErr + case "acs_gdb": + return nil, noDiscoverySupportErr + case "acs_ads": + return nil, noDiscoverySupportErr + case "acs_mongodb": + return nil, noDiscoverySupportErr + case "acs_express_connect": + return nil, noDiscoverySupportErr + case "acs_fc": + return nil, noDiscoverySupportErr + case "acs_nat_gateway": + return nil, noDiscoverySupportErr + case "acs_sls_dashboard": + return nil, noDiscoverySupportErr + case "acs_containerservice_dashboard": + return nil, noDiscoverySupportErr + case "acs_vpn": + return nil, noDiscoverySupportErr + case "acs_bandwidth_package": + return nil, noDiscoverySupportErr + case "acs_cen": + return nil, noDiscoverySupportErr + case "acs_ens": + return nil, noDiscoverySupportErr + case "acs_opensearch": + return nil, noDiscoverySupportErr + case "acs_scdn": + return nil, noDiscoverySupportErr + case "acs_drds": + return nil, noDiscoverySupportErr + case "acs_iot": + return nil, noDiscoverySupportErr + case "acs_directmail": + return nil, noDiscoverySupportErr + case "acs_elasticsearch": + return nil, noDiscoverySupportErr + case "acs_ess_dashboard": + return nil, noDiscoverySupportErr + case "acs_streamcompute": + return nil, noDiscoverySupportErr + case "acs_global_acceleration": + return nil, noDiscoverySupportErr + case "acs_hitsdb": + return nil, noDiscoverySupportErr + case "acs_kafka": + return nil, noDiscoverySupportErr + case "acs_openad": + return nil, noDiscoverySupportErr + case "acs_pcdn": + return nil, noDiscoverySupportErr + case "acs_dcdn": + return nil, noDiscoverySupportErr + case "acs_petadata": + return nil, noDiscoverySupportErr + case "acs_videolive": + return nil, noDiscoverySupportErr + case "acs_hybriddb": + return nil, noDiscoverySupportErr + case "acs_adb": + return nil, noDiscoverySupportErr + case "acs_mps": + return nil, noDiscoverySupportErr + case "acs_maxcompute_prepay": + return nil, noDiscoverySupportErr + case "acs_hdfs": + return nil, noDiscoverySupportErr + case "acs_ddh": + return nil, noDiscoverySupportErr + case "acs_hbr": + return nil, noDiscoverySupportErr + case "acs_hdr": + return nil, noDiscoverySupportErr + case "acs_cds": + return nil, noDiscoverySupportErr + default: + return nil, errors.Errorf("project %q is not recognized by discovery...", project) + } + + cli[region], err = sdk.NewClientWithOptions(region, sdk.NewConfig(), credential) + if err != nil { + return nil, err + } + } + + if len(dscReq) == 0 || len(cli) == 0 { + return nil, errors.Errorf("Can't build discovery request for project: %q,\nregions: %v", project, regions) + } + + //Getting response root key (if not set already). This is to be able to parse discovery responses + //As they differ per object type + //Discovery requests are of the same type per every region, so pick the first one + rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq[regions[0]]) + //This means that the discovery request is not of proper type/kind + if err != nil { + return nil, errors.Errorf("Can't parse rpc request object from discovery request %v", dscReq[regions[0]]) + } + + /* + The action name is of the following format Describe, + For example: DescribeLoadBalancers -> for SLB project, or DescribeInstances for ECS project + We will use it to construct root key name in the discovery API response. + It follows the following logic: for 'DescribeLoadBalancers' action in discovery request we get the response + in json of the following structure: + { + ... + "LoadBalancers": { + "LoadBalancer": [ here comes objects, one per every instance] + } + } + As we can see, the root key is a part of action name, except first word (part) 'Describe' + */ + result := parseRootKey.FindStringSubmatch(rpcReq.GetActionName()) + if result == nil || len(result) != 2 { + return nil, errors.Errorf("Can't parse the discovery response root key from request action name %q", rpcReq.GetActionName()) + } + responseRootKey = result[1] + + return &discoveryTool{ + req: dscReq, + cli: cli, + respRootKey: responseRootKey, + respObjectIDKey: responseObjectIDKey, + rateLimit: rateLimit, + interval: discoveryInterval, + reqDefaultPageSize: 20, + dataChan: make(chan map[string]interface{}, 1), + lg: lg, + }, nil +} + +func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (*parsedDResp, error) { + var ( + fullOutput = map[string]interface{}{} + data []byte + foundDataItem bool + foundRootKey bool + pdResp = &parsedDResp{} + ) + + data = resp.GetHttpContentBytes() + if data == nil { //No data + return nil, errors.Errorf("No data in response to be parsed") + } + + if err := json.Unmarshal(data, &fullOutput); err != nil { + return nil, errors.Errorf("Can't parse JSON from discovery response: %v", err) + } + + for key, val := range fullOutput { + switch key { + case dt.respRootKey: + foundRootKey = true + rootKeyVal, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("Content of root key %q, is not an object: %v", key, val) + } + + //It should contain the array with discovered data + for _, item := range rootKeyVal { + if pdResp.data, foundDataItem = item.([]interface{}); foundDataItem { + break + } + } + if !foundDataItem { + return nil, errors.Errorf("Didn't find array item in root key %q", key) + } + case "TotalCount": + pdResp.totalCount = int(val.(float64)) + case "PageSize": + pdResp.pageSize = int(val.(float64)) + case "PageNumber": + pdResp.pageNumber = int(val.(float64)) + } + } + if !foundRootKey { + return nil, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey) + } + + return pdResp, nil +} + +func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, lmtr chan bool) (map[string]interface{}, error) { + var ( + err error + resp *responses.CommonResponse + pDResp *parsedDResp + discoveryData []interface{} + totalCount int + pageNumber int + ) + defer delete(req.QueryParams, "PageNumber") + + for { + if lmtr != nil { + <-lmtr //Rate limiting + } + + resp, err = cli.ProcessCommonRequest(req) + if err != nil { + return nil, err + } + + pDResp, err = dt.parseDiscoveryResponse(resp) + if err != nil { + return nil, err + } + discoveryData = append(discoveryData, pDResp.data...) + pageNumber = pDResp.pageNumber + totalCount = pDResp.totalCount + + //Pagination + pageNumber++ + req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber) + + if len(discoveryData) == totalCount { //All data received + //Map data to appropriate shape before return + preparedData := map[string]interface{}{} + + for _, raw := range discoveryData { + elem, ok := raw.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("can't parse input data element, not a map[string]interface{} type") + } + if objectID, ok := elem[dt.respObjectIDKey].(string); ok { + preparedData[objectID] = elem + } + } + return preparedData, nil + } + } +} + +func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[string]interface{}, error) { + var ( + data map[string]interface{} + resultData = map[string]interface{}{} + ) + + for region, cli := range dt.cli { + //Building common request, as the code below is the same no matter + //which aliyun object type (project) is used + dscReq, ok := dt.req[region] + if !ok { + return nil, errors.Errorf("Error building common discovery request: not valid region %q", region) + } + + rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq) + if err != nil { + return nil, err + } + + commonRequest := requests.NewCommonRequest() + commonRequest.Method = rpcReq.GetMethod() + commonRequest.Product = rpcReq.GetProduct() + commonRequest.Domain = rpcReq.GetDomain() + commonRequest.Version = rpcReq.GetVersion() + commonRequest.Scheme = rpcReq.GetScheme() + commonRequest.ApiName = rpcReq.GetActionName() + commonRequest.QueryParams = rpcReq.QueryParams + commonRequest.QueryParams["PageSize"] = strconv.Itoa(dt.reqDefaultPageSize) + commonRequest.TransToAcsRequest() + + //Get discovery data using common request + data, err = dt.getDiscoveryData(cli, commonRequest, lmtr) + if err != nil { + return nil, err + } + + for k, v := range data { + resultData[k] = v + } + } + return resultData, nil +} + +// start the discovery pooling +// In case smth. new found it will be reported back through `DataChan` +func (dt *discoveryTool) start() { + var ( + err error + data map[string]interface{} + lastData map[string]interface{} + ) + + //Initializing channel + dt.done = make(chan bool) + + dt.wg.Add(1) + go func() { + defer dt.wg.Done() + + ticker := time.NewTicker(dt.interval) + defer ticker.Stop() + + lmtr := limiter.NewRateLimiter(dt.rateLimit, time.Second) + defer lmtr.Stop() + + for { + select { + case <-dt.done: + return + case <-ticker.C: + data, err = dt.getDiscoveryDataAcrossRegions(lmtr.C) + if err != nil { + dt.lg.Errorf("Can't get discovery data: %v", err) + continue + } + + if !reflect.DeepEqual(data, lastData) { + lastData = nil + lastData = map[string]interface{}{} + for k, v := range data { + lastData[k] = v + } + + //send discovery data in blocking mode + dt.dataChan <- data + } + } + } + }() +} + +// stop the discovery loop, making sure +// all data is read from 'dataChan' +func (dt *discoveryTool) stop() { + close(dt.done) + + //Shutdown timer + timer := time.NewTimer(time.Second * 3) + defer timer.Stop() +L: + for { //Unblock go routine by reading from dt.dataChan + select { + case <-timer.C: + break L + case <-dt.dataChan: + } + } + + dt.wg.Wait() +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 1d1b8eb58b463..4b8d6dc3e1362 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -1,191 +1,208 @@ package all import ( - _ "github.com/influxdata/telegraf/plugins/inputs/activemq" - _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" - _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/apache" - _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" - _ "github.com/influxdata/telegraf/plugins/inputs/aurora" - _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" - _ "github.com/influxdata/telegraf/plugins/inputs/bcache" - _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" - _ "github.com/influxdata/telegraf/plugins/inputs/bind" - _ "github.com/influxdata/telegraf/plugins/inputs/bond" - _ "github.com/influxdata/telegraf/plugins/inputs/burrow" - _ "github.com/influxdata/telegraf/plugins/inputs/cassandra" - _ "github.com/influxdata/telegraf/plugins/inputs/ceph" - _ "github.com/influxdata/telegraf/plugins/inputs/cgroup" - _ "github.com/influxdata/telegraf/plugins/inputs/chrony" - _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" - _ "github.com/influxdata/telegraf/plugins/inputs/clickhouse" - _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" - _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" - _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" - _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" - _ "github.com/influxdata/telegraf/plugins/inputs/consul" - _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" - _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" + //Blank imports for plugins to register themselves + //_ "github.com/influxdata/telegraf/plugins/inputs/activemq" + //_ "github.com/influxdata/telegraf/plugins/inputs/aerospike" + //_ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" + //_ "github.com/influxdata/telegraf/plugins/inputs/amd_rocm_smi" + //_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/apache" + //_ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" + //_ "github.com/influxdata/telegraf/plugins/inputs/aurora" + //_ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" + //_ "github.com/influxdata/telegraf/plugins/inputs/bcache" + //_ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + //_ "github.com/influxdata/telegraf/plugins/inputs/beat" + //_ "github.com/influxdata/telegraf/plugins/inputs/bind" + //_ "github.com/influxdata/telegraf/plugins/inputs/bond" + //_ "github.com/influxdata/telegraf/plugins/inputs/burrow" + //_ "github.com/influxdata/telegraf/plugins/inputs/cassandra" + //_ "github.com/influxdata/telegraf/plugins/inputs/ceph" + //_ "github.com/influxdata/telegraf/plugins/inputs/cgroup" + //_ "github.com/influxdata/telegraf/plugins/inputs/chrony" + //_ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" + //_ "github.com/influxdata/telegraf/plugins/inputs/clickhouse" + //_ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" + //_ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" + //_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" + //_ "github.com/influxdata/telegraf/plugins/inputs/conntrack" + //_ "github.com/influxdata/telegraf/plugins/inputs/consul" + //_ "github.com/influxdata/telegraf/plugins/inputs/couchbase" + //_ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/cpu" - _ "github.com/influxdata/telegraf/plugins/inputs/dcos" + //_ "github.com/influxdata/telegraf/plugins/inputs/csgo" + //_ "github.com/influxdata/telegraf/plugins/inputs/dcos" + //_ "github.com/influxdata/telegraf/plugins/inputs/directory_monitor" _ "github.com/influxdata/telegraf/plugins/inputs/disk" _ "github.com/influxdata/telegraf/plugins/inputs/diskio" - _ "github.com/influxdata/telegraf/plugins/inputs/disque" - _ "github.com/influxdata/telegraf/plugins/inputs/dmcache" - _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" - _ "github.com/influxdata/telegraf/plugins/inputs/docker" - _ "github.com/influxdata/telegraf/plugins/inputs/docker_log" - _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" - _ "github.com/influxdata/telegraf/plugins/inputs/ecs" + //_ "github.com/influxdata/telegraf/plugins/inputs/disque" + //_ "github.com/influxdata/telegraf/plugins/inputs/dmcache" + //_ "github.com/influxdata/telegraf/plugins/inputs/dns_query" + //_ "github.com/influxdata/telegraf/plugins/inputs/docker" + //_ "github.com/influxdata/telegraf/plugins/inputs/docker_log" + //_ "github.com/influxdata/telegraf/plugins/inputs/dovecot" + //_ "github.com/influxdata/telegraf/plugins/inputs/dpdk" + //_ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" - _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" - _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/exec" - _ "github.com/influxdata/telegraf/plugins/inputs/execd" - _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" - _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" - _ "github.com/influxdata/telegraf/plugins/inputs/file" - _ "github.com/influxdata/telegraf/plugins/inputs/filecount" - _ "github.com/influxdata/telegraf/plugins/inputs/filestat" - _ "github.com/influxdata/telegraf/plugins/inputs/fireboard" - _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" - _ "github.com/influxdata/telegraf/plugins/inputs/github" - _ "github.com/influxdata/telegraf/plugins/inputs/gnmi" - _ "github.com/influxdata/telegraf/plugins/inputs/graylog" + //_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch_query" + //_ "github.com/influxdata/telegraf/plugins/inputs/ethtool" + //_ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/exec" + //_ "github.com/influxdata/telegraf/plugins/inputs/execd" + //_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" + //_ "github.com/influxdata/telegraf/plugins/inputs/fibaro" + //_ "github.com/influxdata/telegraf/plugins/inputs/file" + //_ "github.com/influxdata/telegraf/plugins/inputs/filecount" + //_ "github.com/influxdata/telegraf/plugins/inputs/filestat" + //_ "github.com/influxdata/telegraf/plugins/inputs/fireboard" + //_ "github.com/influxdata/telegraf/plugins/inputs/fluentd" + //_ "github.com/influxdata/telegraf/plugins/inputs/github" + //_ "github.com/influxdata/telegraf/plugins/inputs/gnmi" + //_ "github.com/influxdata/telegraf/plugins/inputs/graylog" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" - _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" + //_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" _ "github.com/influxdata/telegraf/plugins/inputs/http" _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" - _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" - _ "github.com/influxdata/telegraf/plugins/inputs/infiniband" - _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" - _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" - _ "github.com/influxdata/telegraf/plugins/inputs/internal" - _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" - _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" - _ "github.com/influxdata/telegraf/plugins/inputs/ipset" - _ "github.com/influxdata/telegraf/plugins/inputs/iptables" - _ "github.com/influxdata/telegraf/plugins/inputs/ipvs" - _ "github.com/influxdata/telegraf/plugins/inputs/jenkins" - _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" - _ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" - _ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" - _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy" - _ "github.com/influxdata/telegraf/plugins/inputs/kapacitor" + //_ "github.com/influxdata/telegraf/plugins/inputs/icinga2" + //_ "github.com/influxdata/telegraf/plugins/inputs/infiniband" + //_ "github.com/influxdata/telegraf/plugins/inputs/influxdb" + //_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" + //_ "github.com/influxdata/telegraf/plugins/inputs/internal" + //_ "github.com/influxdata/telegraf/plugins/inputs/internet_speed" + //_ "github.com/influxdata/telegraf/plugins/inputs/interrupts" + //_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" + //_ "github.com/influxdata/telegraf/plugins/inputs/ipset" + //_ "github.com/influxdata/telegraf/plugins/inputs/iptables" + //_ "github.com/influxdata/telegraf/plugins/inputs/ipvs" + //_ "github.com/influxdata/telegraf/plugins/inputs/jenkins" + //_ "github.com/influxdata/telegraf/plugins/inputs/jolokia" + //_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" + //_ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" + //_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy" + //_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor" _ "github.com/influxdata/telegraf/plugins/inputs/kernel" - _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" - _ "github.com/influxdata/telegraf/plugins/inputs/kibana" - _ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" - _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" - _ "github.com/influxdata/telegraf/plugins/inputs/lanz" - _ "github.com/influxdata/telegraf/plugins/inputs/leofs" - _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" - _ "github.com/influxdata/telegraf/plugins/inputs/logparser" - _ "github.com/influxdata/telegraf/plugins/inputs/logstash" - _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" - _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" - _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" - _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + //_ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/kibana" + //_ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/knx_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" + //_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" + //_ "github.com/influxdata/telegraf/plugins/inputs/lanz" + //_ "github.com/influxdata/telegraf/plugins/inputs/leofs" + //_ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" + //_ "github.com/influxdata/telegraf/plugins/inputs/logparser" + //_ "github.com/influxdata/telegraf/plugins/inputs/logstash" + //_ "github.com/influxdata/telegraf/plugins/inputs/lustre2" + //_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" + //_ "github.com/influxdata/telegraf/plugins/inputs/marklogic" + //_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + //_ "github.com/influxdata/telegraf/plugins/inputs/mdstat" _ "github.com/influxdata/telegraf/plugins/inputs/mem" - _ "github.com/influxdata/telegraf/plugins/inputs/memcached" - _ "github.com/influxdata/telegraf/plugins/inputs/mesos" - _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" - _ "github.com/influxdata/telegraf/plugins/inputs/modbus" - _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" - _ "github.com/influxdata/telegraf/plugins/inputs/monit" - _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/multifile" - _ "github.com/influxdata/telegraf/plugins/inputs/mysql" - _ "github.com/influxdata/telegraf/plugins/inputs/nats" - _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" + //_ "github.com/influxdata/telegraf/plugins/inputs/memcached" + //_ "github.com/influxdata/telegraf/plugins/inputs/mesos" + //_ "github.com/influxdata/telegraf/plugins/inputs/minecraft" + //_ "github.com/influxdata/telegraf/plugins/inputs/modbus" + //_ "github.com/influxdata/telegraf/plugins/inputs/mongodb" + //_ "github.com/influxdata/telegraf/plugins/inputs/monit" + //_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/multifile" + //_ "github.com/influxdata/telegraf/plugins/inputs/mysql" + //_ "github.com/influxdata/telegraf/plugins/inputs/nats" + //_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" _ "github.com/influxdata/telegraf/plugins/inputs/net" - _ "github.com/influxdata/telegraf/plugins/inputs/net_response" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_sts" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" - _ "github.com/influxdata/telegraf/plugins/inputs/nsd" - _ "github.com/influxdata/telegraf/plugins/inputs/nsq" - _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/nstat" - _ "github.com/influxdata/telegraf/plugins/inputs/ntpq" - _ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" - _ "github.com/influxdata/telegraf/plugins/inputs/opcua" - _ "github.com/influxdata/telegraf/plugins/inputs/openldap" - _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" - _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" - _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" - _ "github.com/influxdata/telegraf/plugins/inputs/passenger" - _ "github.com/influxdata/telegraf/plugins/inputs/pf" - _ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" - _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" - _ "github.com/influxdata/telegraf/plugins/inputs/ping" - _ "github.com/influxdata/telegraf/plugins/inputs/postfix" - _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" - _ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" - _ "github.com/influxdata/telegraf/plugins/inputs/powerdns" - _ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor" - _ "github.com/influxdata/telegraf/plugins/inputs/processes" - _ "github.com/influxdata/telegraf/plugins/inputs/procstat" - _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" - _ "github.com/influxdata/telegraf/plugins/inputs/proxmox" - _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" - _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" - _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" - _ "github.com/influxdata/telegraf/plugins/inputs/ras" - _ "github.com/influxdata/telegraf/plugins/inputs/redfish" - _ "github.com/influxdata/telegraf/plugins/inputs/redis" - _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" - _ "github.com/influxdata/telegraf/plugins/inputs/riak" - _ "github.com/influxdata/telegraf/plugins/inputs/salesforce" - _ "github.com/influxdata/telegraf/plugins/inputs/sensors" - _ "github.com/influxdata/telegraf/plugins/inputs/sflow" - _ "github.com/influxdata/telegraf/plugins/inputs/smart" - _ "github.com/influxdata/telegraf/plugins/inputs/snmp" - _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" - _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" + //_ "github.com/influxdata/telegraf/plugins/inputs/net_response" + //_ "github.com/influxdata/telegraf/plugins/inputs/nfsclient" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_sts" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" + //_ "github.com/influxdata/telegraf/plugins/inputs/nsd" + //_ "github.com/influxdata/telegraf/plugins/inputs/nsq" + //_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/nstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/ntpq" + //_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" + //_ "github.com/influxdata/telegraf/plugins/inputs/opcua" + //_ "github.com/influxdata/telegraf/plugins/inputs/openldap" + //_ "github.com/influxdata/telegraf/plugins/inputs/openntpd" + //_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + //_ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" + //_ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" + //_ "github.com/influxdata/telegraf/plugins/inputs/passenger" + //_ "github.com/influxdata/telegraf/plugins/inputs/pf" + //_ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" + //_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" + //_ "github.com/influxdata/telegraf/plugins/inputs/ping" + //_ "github.com/influxdata/telegraf/plugins/inputs/postfix" + //_ "github.com/influxdata/telegraf/plugins/inputs/postgresql" + //_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" + //_ "github.com/influxdata/telegraf/plugins/inputs/powerdns" + //_ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor" + //_ "github.com/influxdata/telegraf/plugins/inputs/processes" + //_ "github.com/influxdata/telegraf/plugins/inputs/procstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/prometheus" + //_ "github.com/influxdata/telegraf/plugins/inputs/proxmox" + //_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" + //_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" + //_ "github.com/influxdata/telegraf/plugins/inputs/raindrops" + //_ "github.com/influxdata/telegraf/plugins/inputs/ras" + //_ "github.com/influxdata/telegraf/plugins/inputs/ravendb" + //_ "github.com/influxdata/telegraf/plugins/inputs/redfish" + //_ "github.com/influxdata/telegraf/plugins/inputs/redis" + //_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" + //_ "github.com/influxdata/telegraf/plugins/inputs/riak" + //_ "github.com/influxdata/telegraf/plugins/inputs/riemann_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/salesforce" + //_ "github.com/influxdata/telegraf/plugins/inputs/sensors" + //_ "github.com/influxdata/telegraf/plugins/inputs/sflow" + //_ "github.com/influxdata/telegraf/plugins/inputs/smart" + //_ "github.com/influxdata/telegraf/plugins/inputs/snmp" + //_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" + //_ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" _ "github.com/influxdata/telegraf/plugins/inputs/solr" - _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" - _ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" - _ "github.com/influxdata/telegraf/plugins/inputs/statsd" - _ "github.com/influxdata/telegraf/plugins/inputs/suricata" - _ "github.com/influxdata/telegraf/plugins/inputs/swap" - _ "github.com/influxdata/telegraf/plugins/inputs/synproxy" + //_ "github.com/influxdata/telegraf/plugins/inputs/sql" + //_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" + //_ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" + //_ "github.com/influxdata/telegraf/plugins/inputs/statsd" + //_ "github.com/influxdata/telegraf/plugins/inputs/suricata" + //_ "github.com/influxdata/telegraf/plugins/inputs/swap" + //_ "github.com/influxdata/telegraf/plugins/inputs/synproxy" _ "github.com/influxdata/telegraf/plugins/inputs/syslog" - _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/system" - _ "github.com/influxdata/telegraf/plugins/inputs/systemd_units" + //_ "github.com/influxdata/telegraf/plugins/inputs/systemd_units" _ "github.com/influxdata/telegraf/plugins/inputs/tail" - _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" - _ "github.com/influxdata/telegraf/plugins/inputs/temp" - _ "github.com/influxdata/telegraf/plugins/inputs/tengine" - _ "github.com/influxdata/telegraf/plugins/inputs/tomcat" - _ "github.com/influxdata/telegraf/plugins/inputs/trig" - _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" - _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/unbound" - _ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" - _ "github.com/influxdata/telegraf/plugins/inputs/varnish" - _ "github.com/influxdata/telegraf/plugins/inputs/vsphere" - _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" - _ "github.com/influxdata/telegraf/plugins/inputs/win_eventlog" - _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" - _ "github.com/influxdata/telegraf/plugins/inputs/win_services" - _ "github.com/influxdata/telegraf/plugins/inputs/wireguard" - _ "github.com/influxdata/telegraf/plugins/inputs/wireless" - _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" - _ "github.com/influxdata/telegraf/plugins/inputs/zfs" - _ "github.com/influxdata/telegraf/plugins/inputs/zipkin" - _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" + //_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" + //_ "github.com/influxdata/telegraf/plugins/inputs/temp" + //_ "github.com/influxdata/telegraf/plugins/inputs/tengine" + //_ "github.com/influxdata/telegraf/plugins/inputs/tomcat" + //_ "github.com/influxdata/telegraf/plugins/inputs/trig" + //_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" + //_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/unbound" + //_ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" + //_ "github.com/influxdata/telegraf/plugins/inputs/varnish" + //_ "github.com/influxdata/telegraf/plugins/inputs/vsphere" + //_ "github.com/influxdata/telegraf/plugins/inputs/webhooks" + //_ "github.com/influxdata/telegraf/plugins/inputs/win_eventlog" + //_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" + //_ "github.com/influxdata/telegraf/plugins/inputs/win_services" + //_ "github.com/influxdata/telegraf/plugins/inputs/wireguard" + //_ "github.com/influxdata/telegraf/plugins/inputs/wireless" + //_ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" + //_ "github.com/influxdata/telegraf/plugins/inputs/zfs" + //_ "github.com/influxdata/telegraf/plugins/inputs/zipkin" + //_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" ) diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md new file mode 100644 index 0000000000000..ac080974dd274 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -0,0 +1,58 @@ +# AMD ROCm System Management Interface (SMI) Input Plugin + +This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. + +### Configuration + +```toml +# Pulls statistics from AMD GPUs attached to the host +[[inputs.amd_rocm_smi]] + ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/opt/rocm/bin/rocm-smi" + + ## Optional: timeout for GPU polling + # timeout = "5s" +``` + +### Metrics +- measurement: `amd_rocm_smi` + - tags + - `name` (entry name assigned by rocm-smi executable) + - `gpu_id` (id of the GPU according to rocm-smi) + - `gpu_unique_id` (unique id of the GPU) + + - fields + - `driver_version` (integer) + - `fan_speed`(integer) + - `memory_total`(integer B) + - `memory_used`(integer B) + - `memory_free`(integer B) + - `temperature_sensor_edge` (float, Celsius) + - `temperature_sensor_junction` (float, Celsius) + - `temperature_sensor_memory` (float, Celsius) + - `utilization_gpu` (integer, percentage) + - `utilization_memory` (integer, percentage) + - `clocks_current_sm` (integer, Mhz) + - `clocks_current_memory` (integer, Mhz) + - `power_draw` (float, Watt) + +### Troubleshooting +Check the full output by running `rocm-smi` binary manually. + +Linux: +```sh +rocm-smi rocm-smi -o -l -m -M -g -c -t -u -i -f -p -P -s -S -v --showreplaycount --showpids --showdriverversion --showmemvendor --showfwinfo --showproductname --showserial --showuniqueid --showbus --showpendingpages --showpagesinfo --showretiredpages --showunreservablepages --showmemuse --showvoltage --showtopo --showtopoweight --showtopohops --showtopotype --showtoponuma --showmeminfo all --json +``` +Please include the output of this command if opening a GitHub issue, together with ROCm version. +### Example Output +``` +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=28,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572551000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=30,temperature_sensor_memory=91,utilization_gpu=0i 1630572701000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572749000000000 +``` +### Limitations and notices +Please notice that this plugin has been developed and tested on a limited number of versions and small set of GPUs. Currently the latest ROCm version tested is 4.3.0. +Notice that depending on the device and driver versions the amount of information provided by `rocm-smi` can vary so that some fields would start/stop appearing in the metrics upon updates. +The `rocm-smi` JSON output is not perfectly homogeneous and is possibly changing in the future, hence parsing and unmarshaling can start failing upon updating ROCm. + +Inspired by the current state of the art of the `nvidia-smi` plugin. diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go new file mode 100644 index 0000000000000..7fdd32f466b73 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -0,0 +1,294 @@ +package amd_rocm_smi + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const measurement = "amd_rocm_smi" + +type ROCmSMI struct { + BinPath string + Timeout config.Duration +} + +// Description returns the description of the ROCmSMI plugin +func (rsmi *ROCmSMI) Description() string { + return "Query statistics from AMD Graphics cards using rocm-smi binary" +} + +var ROCmSMIConfig = ` +## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# bin_path = "/opt/rocm/bin/rocm-smi" + +## Optional: timeout for GPU polling +# timeout = "5s" +` + +// SampleConfig returns the sample configuration for the ROCmSMI plugin +func (rsmi *ROCmSMI) SampleConfig() string { + return ROCmSMIConfig +} + +// Gather implements the telegraf interface +func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { + if _, err := os.Stat(rsmi.BinPath); os.IsNotExist(err) { + return fmt.Errorf("rocm-smi binary not found in path %s, cannot query GPUs statistics", rsmi.BinPath) + } + + data, err := rsmi.pollROCmSMI() + if err != nil { + return err + } + + err = gatherROCmSMI(data, acc) + if err != nil { + return err + } + + return nil +} + +func init() { + inputs.Add("amd_rocm_smi", func() telegraf.Input { + return &ROCmSMI{ + BinPath: "/opt/rocm/bin/rocm-smi", + Timeout: config.Duration(5 * time.Second), + } + }) +} + +func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) { + // Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option + // that does not provide all the information, so each needed parameter is set manually + cmd := exec.Command(rsmi.BinPath, + "-o", + "-l", + "-m", + "-M", + "-g", + "-c", + "-t", + "-u", + "-i", + "-f", + "-p", + "-P", + "-s", + "-S", + "-v", + "--showreplaycount", + "--showpids", + "--showdriverversion", + "--showmemvendor", + "--showfwinfo", + "--showproductname", + "--showserial", + "--showuniqueid", + "--showbus", + "--showpendingpages", + "--showpagesinfo", + "--showmeminfo", + "all", + "--showretiredpages", + "--showunreservablepages", + "--showmemuse", + "--showvoltage", + "--showtopo", + "--showtopoweight", + "--showtopohops", + "--showtopotype", + "--showtoponuma", + "--json") + + ret, _ := internal.StdOutputTimeout(cmd, + time.Duration(rsmi.Timeout)) + return ret, nil +} + +func gatherROCmSMI(ret []byte, acc telegraf.Accumulator) error { + var gpus map[string]GPU + var sys map[string]sysInfo + + err1 := json.Unmarshal(ret, &gpus) + if err1 != nil { + return err1 + } + + err2 := json.Unmarshal(ret, &sys) + if err2 != nil { + return err2 + } + + metrics := genTagsFields(gpus, sys) + for _, metric := range metrics { + acc.AddFields(measurement, metric.fields, metric.tags) + } + + return nil +} + +type metric struct { + tags map[string]string + fields map[string]interface{} +} + +func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric { + metrics := []metric{} + for cardID, payload := range gpus { + if strings.Contains(cardID, "card") { + tags := map[string]string{ + "name": cardID, + } + fields := map[string]interface{}{} + + totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64) + usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64) + strFree := strconv.FormatInt(totVRAM-usdVRAM, 10) + + setTagIfUsed(tags, "gpu_id", payload.GpuID) + setTagIfUsed(tags, "gpu_unique_id", payload.GpuUniqueID) + + setIfUsed("int", fields, "driver_version", strings.Replace(system["system"].DriverVersion, ".", "", -1)) + setIfUsed("int", fields, "fan_speed", payload.GpuFanSpeedPercentage) + setIfUsed("int64", fields, "memory_total", payload.GpuVRAMTotalMemory) + setIfUsed("int64", fields, "memory_used", payload.GpuVRAMTotalUsedMemory) + setIfUsed("int64", fields, "memory_free", strFree) + setIfUsed("float", fields, "temperature_sensor_edge", payload.GpuTemperatureSensorEdge) + setIfUsed("float", fields, "temperature_sensor_junction", payload.GpuTemperatureSensorJunction) + setIfUsed("float", fields, "temperature_sensor_memory", payload.GpuTemperatureSensorMemory) + setIfUsed("int", fields, "utilization_gpu", payload.GpuUsePercentage) + setIfUsed("int", fields, "utilization_memory", payload.GpuMemoryUsePercentage) + setIfUsed("int", fields, "clocks_current_sm", strings.Trim(payload.GpuSclkClockSpeed, "(Mhz)")) + setIfUsed("int", fields, "clocks_current_memory", strings.Trim(payload.GpuMclkClockSpeed, "(Mhz)")) + setIfUsed("float", fields, "power_draw", payload.GpuAveragePower) + + metrics = append(metrics, metric{tags, fields}) + } + } + return metrics +} + +func setTagIfUsed(m map[string]string, k, v string) { + if v != "" { + m[k] = v + } +} + +func setIfUsed(t string, m map[string]interface{}, k, v string) { + vals := strings.Fields(v) + if len(vals) < 1 { + return + } + + val := vals[0] + + switch t { + case "float": + if val != "" { + f, err := strconv.ParseFloat(val, 64) + if err == nil { + m[k] = f + } + } + case "int": + if val != "" { + i, err := strconv.Atoi(val) + if err == nil { + m[k] = i + } + } + case "int64": + if val != "" { + i, err := strconv.ParseInt(val, 10, 64) + if err == nil { + m[k] = i + } + } + case "str": + if val != "" { + m[k] = val + } + } +} + +type sysInfo struct { + DriverVersion string `json:"Driver version"` +} + +type GPU struct { + GpuID string `json:"GPU ID"` + GpuUniqueID string `json:"Unique ID"` + GpuVBIOSVersion string `json:"VBIOS version"` + GpuTemperatureSensorEdge string `json:"Temperature (Sensor edge) (C)"` + GpuTemperatureSensorJunction string `json:"Temperature (Sensor junction) (C)"` + GpuTemperatureSensorMemory string `json:"Temperature (Sensor memory) (C)"` + GpuDcefClkClockSpeed string `json:"dcefclk clock speed"` + GpuDcefClkClockLevel string `json:"dcefclk clock level"` + GpuFclkClockSpeed string `json:"fclk clock speed"` + GpuFclkClockLevel string `json:"fclk clock level"` + GpuMclkClockSpeed string `json:"mclk clock speed:"` + GpuMclkClockLevel string `json:"mclk clock level:"` + GpuSclkClockSpeed string `json:"sclk clock speed:"` + GpuSclkClockLevel string `json:"sclk clock level:"` + GpuSocclkClockSpeed string `json:"socclk clock speed"` + GpuSocclkClockLevel string `json:"socclk clock level"` + GpuPcieClock string `json:"pcie clock level"` + GpuFanSpeedLevel string `json:"Fan speed (level)"` + GpuFanSpeedPercentage string `json:"Fan speed (%)"` + GpuFanRPM string `json:"Fan RPM"` + GpuPerformanceLevel string `json:"Performance Level"` + GpuOverdrive string `json:"GPU OverDrive value (%)"` + GpuMaxPower string `json:"Max Graphics Package Power (W)"` + GpuAveragePower string `json:"Average Graphics Package Power (W)"` + GpuUsePercentage string `json:"GPU use (%)"` + GpuMemoryUsePercentage string `json:"GPU memory use (%)"` + GpuMemoryVendor string `json:"GPU memory vendor"` + GpuPCIeReplay string `json:"PCIe Replay Count"` + GpuSerialNumber string `json:"Serial Number"` + GpuVoltagemV string `json:"Voltage (mV)"` + GpuPCIBus string `json:"PCI Bus"` + GpuASDDirmware string `json:"ASD firmware version"` + GpuCEFirmware string `json:"CE firmware version"` + GpuDMCUFirmware string `json:"DMCU firmware version"` + GpuMCFirmware string `json:"MC firmware version"` + GpuMEFirmware string `json:"ME firmware version"` + GpuMECFirmware string `json:"MEC firmware version"` + GpuMEC2Firmware string `json:"MEC2 firmware version"` + GpuPFPFirmware string `json:"PFP firmware version"` + GpuRLCFirmware string `json:"RLC firmware version"` + GpuRLCSRLC string `json:"RLC SRLC firmware version"` + GpuRLCSRLG string `json:"RLC SRLG firmware version"` + GpuRLCSRLS string `json:"RLC SRLS firmware version"` + GpuSDMAFirmware string `json:"SDMA firmware version"` + GpuSDMA2Firmware string `json:"SDMA2 firmware version"` + GpuSMCFirmware string `json:"SMC firmware version"` + GpuSOSFirmware string `json:"SOS firmware version"` + GpuTARAS string `json:"TA RAS firmware version"` + GpuTAXGMI string `json:"TA XGMI firmware version"` + GpuUVDFirmware string `json:"UVD firmware version"` + GpuVCEFirmware string `json:"VCE firmware version"` + GpuVCNFirmware string `json:"VCN firmware version"` + GpuCardSeries string `json:"Card series"` + GpuCardModel string `json:"Card model"` + GpuCardVendor string `json:"Card vendor"` + GpuCardSKU string `json:"Card SKU"` + GpuNUMANode string `json:"(Topology) Numa Node"` + GpuNUMAAffinity string `json:"(Topology) Numa Affinity"` + GpuVisVRAMTotalMemory string `json:"VIS_VRAM Total Memory (B)"` + GpuVisVRAMTotalUsedMemory string `json:"VIS_VRAM Total Used Memory (B)"` + GpuVRAMTotalMemory string `json:"VRAM Total Memory (B)"` + GpuVRAMTotalUsedMemory string `json:"VRAM Total Used Memory (B)"` + GpuGTTTotalMemory string `json:"GTT Total Memory (B)"` + GpuGTTTotalUsedMemory string `json:"GTT Total Used Memory (B)"` +} diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go new file mode 100644 index 0000000000000..e38e0ff89eae0 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -0,0 +1,90 @@ +package amd_rocm_smi + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGatherValidJSON(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "Vega 10 XT", + filename: "vega-10-XT.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x6861", + "gpu_unique_id": "0x2150e7d042a1124", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5925, + "fan_speed": 13, + "memory_total": int64(17163091968), + "memory_used": int64(17776640), + "memory_free": int64(17145315328), + "temperature_sensor_edge": 39.0, + "temperature_sensor_junction": 40.0, + "temperature_sensor_memory": 92.0, + "utilization_gpu": 0, + "clocks_current_sm": 1269, + "clocks_current_memory": 167, + "power_draw": 15.0, + }, + time.Unix(0, 0)), + }, + }, + { + name: "Vega 20 WKS GL-XE [Radeon Pro VII]", + filename: "vega-20-WKS-GL-XE.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x66a1", + "gpu_unique_id": "0x2f048617326b1ea", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5917, + "fan_speed": 0, + "memory_total": int64(34342961152), + "memory_used": int64(10850304), + "memory_free": int64(34332110848), + "temperature_sensor_edge": 36.0, + "temperature_sensor_junction": 38.0, + "temperature_sensor_memory": 35.0, + "utilization_gpu": 0, + "utilization_memory": 0, + "clocks_current_sm": 1725, + "clocks_current_memory": 1000, + "power_draw": 26.0, + }, + time.Unix(0, 0)), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + err = gatherROCmSMI(octets, &acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json new file mode 100644 index 0000000000000..c4d51f5253a51 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json @@ -0,0 +1,77 @@ +{ + "card0": { + "GPU ID": "0x6861", + "Unique ID": "0x2150e7d042a1124", + "VBIOS version": "113-D0510100-106", + "Temperature (Sensor edge) (C)": "39.0", + "Temperature (Sensor junction) (C)": "40.0", + "Temperature (Sensor memory) (C)": "92.0", + "dcefclk clock speed:": "(600Mhz)", + "dcefclk clock level:": "0", + "mclk clock speed:": "(167Mhz)", + "mclk clock level:": "0", + "sclk clock speed:": "(1269Mhz)", + "sclk clock level:": "3", + "socclk clock speed:": "(960Mhz)", + "socclk clock level:": "3", + "pcie clock level": "1 (8.0GT/s x16)", + "sclk clock level": "3 (1269Mhz)", + "Fan speed (level)": "33", + "Fan speed (%)": "13", + "Fan RPM": "682", + "Performance Level": "auto", + "GPU OverDrive value (%)": "0", + "GPU Memory OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "170.0", + "Average Graphics Package Power (W)": "15.0", + "0": "8.0GT/s x16", + "1": "8.0GT/s x16 *", + "2": "847Mhz", + "3": "960Mhz *", + "4": "1028Mhz", + "5": "1107Mhz", + "6": "1440Mhz", + "7": "1500Mhz", + "GPU use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "N/A", + "Voltage (mV)": "906", + "PCI Bus": "0000:04:00.0", + "VRAM Total Memory (B)": "17163091968", + "VRAM Total Used Memory (B)": "17776640", + "VIS_VRAM Total Memory (B)": "268435456", + "VIS_VRAM Total Used Memory (B)": "13557760", + "GTT Total Memory (B)": "17163091968", + "GTT Total Used Memory (B)": "25608192", + "ASD firmware version": "553648152", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "163", + "MEC firmware version": "432", + "MEC2 firmware version": "432", + "PFP firmware version": "186", + "RLC firmware version": "93", + "RLC SRLC firmware version": "0", + "RLC SRLG firmware version": "0", + "RLC SRLS firmware version": "0", + "SDMA firmware version": "430", + "SDMA2 firmware version": "430", + "SMC firmware version": "00.28.54.00", + "SOS firmware version": "0x0008015d", + "TA RAS firmware version": "00.00.00.00", + "TA XGMI firmware version": "00.00.00.00", + "UVD firmware version": "0x422b1100", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card model": "0xc1e", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D05101", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.25" + } +} \ No newline at end of file diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json new file mode 100644 index 0000000000000..771565a607bd5 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json @@ -0,0 +1,165 @@ +{ + "card0": { + "GPU ID": "0x66a1", + "Unique ID": "0x2f048617326b1ea", + "VBIOS version": "113-D1631700-111", + "Temperature (Sensor edge) (C)": "36.0", + "Temperature (Sensor junction) (C)": "38.0", + "Temperature (Sensor memory) (C)": "35.0", + "dcefclk clock speed:": "(357Mhz)", + "dcefclk clock level:": "0", + "fclk clock speed:": "(1080Mhz)", + "fclk clock level:": "6", + "mclk clock speed:": "(1000Mhz)", + "mclk clock level:": "2", + "sclk clock speed:": "(1725Mhz)", + "sclk clock level:": "8", + "socclk clock speed:": "(971Mhz)", + "socclk clock level:": "7", + "pcie clock level": "1 (16.0GT/s x16)", + "sclk clock level": "8 (1725Mhz)", + "Fan speed (level)": "0", + "Fan speed (%)": "0", + "Fan RPM": "0", + "Performance Level": "high", + "GPU OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "225.0", + "Average Graphics Package Power (W)": "26.0", + "0": "2.5GT/s x16", + "1": "16.0GT/s x16 *", + "2": "566Mhz", + "3": "618Mhz", + "4": "680Mhz", + "5": "755Mhz", + "6": "850Mhz", + "7": "971Mhz *", + "8": "1725Mhz *", + "GPU use (%)": "0", + "GPU memory use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "692024000810", + "Voltage (mV)": "1000", + "PCI Bus": "0000:63:00.0", + "VRAM Total Memory (B)": "34342961152", + "VRAM Total Used Memory (B)": "10850304", + "VIS_VRAM Total Memory (B)": "34342961152", + "VIS_VRAM Total Used Memory (B)": "10850304", + "GTT Total Memory (B)": "54974742528", + "GTT Total Used Memory (B)": "11591680", + "ASD firmware version": "553648199", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "164", + "MEC firmware version": "448", + "MEC2 firmware version": "448", + "PFP firmware version": "188", + "RLC firmware version": "50", + "RLC SRLC firmware version": "1", + "RLC SRLG firmware version": "1", + "RLC SRLS firmware version": "1", + "SDMA firmware version": "144", + "SDMA2 firmware version": "144", + "SMC firmware version": "00.40.59.00", + "SOS firmware version": "0x00080b67", + "TA RAS firmware version": "27.00.01.36", + "TA XGMI firmware version": "32.00.00.02", + "UVD firmware version": "0x42002b13", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card series": "Radeon Instinct MI50 32GB", + "Card model": "0x834", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D16317", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.17", + "(Topology) Weight between DRM devices 0 and 1": "40", + "(Topology) Weight between DRM devices 0 and 2": "40", + "(Topology) Weight between DRM devices 0 and 3": "40", + "(Topology) Weight between DRM devices 0 and 4": "72", + "(Topology) Weight between DRM devices 0 and 5": "72", + "(Topology) Weight between DRM devices 0 and 6": "72", + "(Topology) Weight between DRM devices 0 and 7": "72", + "(Topology) Weight between DRM devices 1 and 2": "40", + "(Topology) Weight between DRM devices 1 and 3": "40", + "(Topology) Weight between DRM devices 1 and 4": "72", + "(Topology) Weight between DRM devices 1 and 5": "72", + "(Topology) Weight between DRM devices 1 and 6": "72", + "(Topology) Weight between DRM devices 1 and 7": "72", + "(Topology) Weight between DRM devices 2 and 3": "40", + "(Topology) Weight between DRM devices 2 and 4": "72", + "(Topology) Weight between DRM devices 2 and 5": "72", + "(Topology) Weight between DRM devices 2 and 6": "72", + "(Topology) Weight between DRM devices 2 and 7": "72", + "(Topology) Weight between DRM devices 3 and 4": "72", + "(Topology) Weight between DRM devices 3 and 5": "72", + "(Topology) Weight between DRM devices 3 and 6": "72", + "(Topology) Weight between DRM devices 3 and 7": "72", + "(Topology) Weight between DRM devices 4 and 5": "40", + "(Topology) Weight between DRM devices 4 and 6": "40", + "(Topology) Weight between DRM devices 4 and 7": "40", + "(Topology) Weight between DRM devices 5 and 6": "40", + "(Topology) Weight between DRM devices 5 and 7": "40", + "(Topology) Weight between DRM devices 6 and 7": "40", + "(Topology) Hops between DRM devices 0 and 1": "2", + "(Topology) Hops between DRM devices 0 and 2": "2", + "(Topology) Hops between DRM devices 0 and 3": "2", + "(Topology) Hops between DRM devices 0 and 4": "3", + "(Topology) Hops between DRM devices 0 and 5": "3", + "(Topology) Hops between DRM devices 0 and 6": "3", + "(Topology) Hops between DRM devices 0 and 7": "3", + "(Topology) Hops between DRM devices 1 and 2": "2", + "(Topology) Hops between DRM devices 1 and 3": "2", + "(Topology) Hops between DRM devices 1 and 4": "3", + "(Topology) Hops between DRM devices 1 and 5": "3", + "(Topology) Hops between DRM devices 1 and 6": "3", + "(Topology) Hops between DRM devices 1 and 7": "3", + "(Topology) Hops between DRM devices 2 and 3": "2", + "(Topology) Hops between DRM devices 2 and 4": "3", + "(Topology) Hops between DRM devices 2 and 5": "3", + "(Topology) Hops between DRM devices 2 and 6": "3", + "(Topology) Hops between DRM devices 2 and 7": "3", + "(Topology) Hops between DRM devices 3 and 4": "3", + "(Topology) Hops between DRM devices 3 and 5": "3", + "(Topology) Hops between DRM devices 3 and 6": "3", + "(Topology) Hops between DRM devices 3 and 7": "3", + "(Topology) Hops between DRM devices 4 and 5": "2", + "(Topology) Hops between DRM devices 4 and 6": "2", + "(Topology) Hops between DRM devices 4 and 7": "2", + "(Topology) Hops between DRM devices 5 and 6": "2", + "(Topology) Hops between DRM devices 5 and 7": "2", + "(Topology) Hops between DRM devices 6 and 7": "2", + "(Topology) Link type between DRM devices 0 and 1": "PCIE", + "(Topology) Link type between DRM devices 0 and 2": "PCIE", + "(Topology) Link type between DRM devices 0 and 3": "PCIE", + "(Topology) Link type between DRM devices 0 and 4": "PCIE", + "(Topology) Link type between DRM devices 0 and 5": "PCIE", + "(Topology) Link type between DRM devices 0 and 6": "PCIE", + "(Topology) Link type between DRM devices 0 and 7": "PCIE", + "(Topology) Link type between DRM devices 1 and 2": "PCIE", + "(Topology) Link type between DRM devices 1 and 3": "PCIE", + "(Topology) Link type between DRM devices 1 and 4": "PCIE", + "(Topology) Link type between DRM devices 1 and 5": "PCIE", + "(Topology) Link type between DRM devices 1 and 6": "PCIE", + "(Topology) Link type between DRM devices 1 and 7": "PCIE", + "(Topology) Link type between DRM devices 2 and 3": "PCIE", + "(Topology) Link type between DRM devices 2 and 4": "PCIE", + "(Topology) Link type between DRM devices 2 and 5": "PCIE", + "(Topology) Link type between DRM devices 2 and 6": "PCIE", + "(Topology) Link type between DRM devices 2 and 7": "PCIE", + "(Topology) Link type between DRM devices 3 and 4": "PCIE", + "(Topology) Link type between DRM devices 3 and 5": "PCIE", + "(Topology) Link type between DRM devices 3 and 6": "PCIE", + "(Topology) Link type between DRM devices 3 and 7": "PCIE", + "(Topology) Link type between DRM devices 4 and 5": "PCIE", + "(Topology) Link type between DRM devices 4 and 6": "PCIE", + "(Topology) Link type between DRM devices 4 and 7": "PCIE", + "(Topology) Link type between DRM devices 5 and 6": "PCIE", + "(Topology) Link type between DRM devices 5 and 7": "PCIE", + "(Topology) Link type between DRM devices 6 and 7": "PCIE" + } +} \ No newline at end of file diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index 8ef6d6fe2a8e9..ff417eb26b67c 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -43,7 +43,7 @@ The following defaults are known to work with RabbitMQ: # exchange_arguments = { } # exchange_arguments = {"hash_property" = "timestamp"} - ## AMQP queue name + ## AMQP queue name. queue = "telegraf" ## AMQP queue durability can be "transient" or "durable". diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index d98b1c19f4ab3..abe86bc385515 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -9,12 +9,13 @@ import ( "sync" "time" + "github.com/streadway/amqp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/streadway/amqp" ) const ( @@ -71,7 +72,7 @@ func (a *externalAuth) Mechanism() string { return "EXTERNAL" } func (a *externalAuth) Response() string { - return fmt.Sprintf("\000") + return "\000" } const ( @@ -183,7 +184,7 @@ func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error { func (a *AMQPConsumer) createConfig() (*amqp.Config, error) { // make new tls config - tls, err := a.ClientConfig.TLSConfig() + tlsCfg, err := a.ClientConfig.TLSConfig() if err != nil { return nil, err } @@ -201,7 +202,7 @@ func (a *AMQPConsumer) createConfig() (*amqp.Config, error) { } config := amqp.Config{ - TLSClientConfig: tls, + TLSClientConfig: tlsCfg, SASL: auth, // if nil, it will be PLAIN } return &config, nil @@ -288,16 +289,13 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err ch, err := a.conn.Channel() if err != nil { - return nil, fmt.Errorf("Failed to open a channel: %s", err.Error()) + return nil, fmt.Errorf("failed to open a channel: %s", err.Error()) } if a.Exchange != "" { - var exchangeDurable = true - switch a.ExchangeDurability { - case "transient": + exchangeDurable := true + if a.ExchangeDurability == "transient" { exchangeDurable = false - default: - exchangeDurable = true } exchangeArgs := make(amqp.Table, len(a.ExchangeArguments)) @@ -305,11 +303,8 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err exchangeArgs[k] = v } - err = declareExchange( + err = a.declareExchange( ch, - a.Exchange, - a.ExchangeType, - a.ExchangePassive, exchangeDurable, exchangeArgs) if err != nil { @@ -317,11 +312,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err } } - q, err := declareQueue( - ch, - a.Queue, - a.QueueDurability, - a.QueuePassive) + q, err := a.declareQueue(ch) if err != nil { return nil, err } @@ -335,7 +326,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err nil, ) if err != nil { - return nil, fmt.Errorf("Failed to bind a queue: %s", err) + return nil, fmt.Errorf("failed to bind a queue: %s", err) } } @@ -345,7 +336,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err false, // global ) if err != nil { - return nil, fmt.Errorf("Failed to set QoS: %s", err) + return nil, fmt.Errorf("failed to set QoS: %s", err) } msgs, err := ch.Consume( @@ -358,25 +349,22 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err nil, // arguments ) if err != nil { - return nil, fmt.Errorf("Failed establishing connection to queue: %s", err) + return nil, fmt.Errorf("failed establishing connection to queue: %s", err) } return msgs, err } -func declareExchange( +func (a *AMQPConsumer) declareExchange( channel *amqp.Channel, - exchangeName string, - exchangeType string, - exchangePassive bool, exchangeDurable bool, exchangeArguments amqp.Table, ) error { var err error - if exchangePassive { + if a.ExchangePassive { err = channel.ExchangeDeclarePassive( - exchangeName, - exchangeType, + a.Exchange, + a.ExchangeType, exchangeDurable, false, // delete when unused false, // internal @@ -385,8 +373,8 @@ func declareExchange( ) } else { err = channel.ExchangeDeclare( - exchangeName, - exchangeType, + a.Exchange, + a.ExchangeType, exchangeDurable, false, // delete when unused false, // internal @@ -395,31 +383,23 @@ func declareExchange( ) } if err != nil { - return fmt.Errorf("Error declaring exchange: %v", err) + return fmt.Errorf("error declaring exchange: %v", err) } return nil } -func declareQueue( - channel *amqp.Channel, - queueName string, - queueDurability string, - queuePassive bool, -) (*amqp.Queue, error) { +func (a *AMQPConsumer) declareQueue(channel *amqp.Channel) (*amqp.Queue, error) { var queue amqp.Queue var err error - var queueDurable = true - switch queueDurability { - case "transient": + queueDurable := true + if a.QueueDurability == "transient" { queueDurable = false - default: - queueDurable = true } - if queuePassive { + if a.QueuePassive { queue, err = channel.QueueDeclarePassive( - queueName, // queue + a.Queue, // queue queueDurable, // durable false, // delete when unused false, // exclusive @@ -428,7 +408,7 @@ func declareQueue( ) } else { queue, err = channel.QueueDeclare( - queueName, // queue + a.Queue, // queue queueDurable, // durable false, // delete when unused false, // exclusive @@ -437,7 +417,7 @@ func declareQueue( ) } if err != nil { - return nil, fmt.Errorf("Error declaring queue: %v", err) + return nil, fmt.Errorf("error declaring queue: %v", err) } return &queue, nil } diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index ff7341b838f75..9b9059ac8d48a 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -12,7 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -21,7 +21,7 @@ type Apache struct { Urls []string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *http.Client @@ -62,12 +62,12 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { if len(n.Urls) == 0 { n.Urls = []string{"http://localhost/server-status?auto"} } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -77,14 +77,14 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -92,7 +92,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Apache) createHttpClient() (*http.Client, error) { +func (n *Apache) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -102,16 +102,16 @@ func (n *Apache) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil } -func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { req, err := http.NewRequest("GET", addr.String(), nil) if err != nil { - return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err) + return fmt.Errorf("error on new request to %s : %s", addr.String(), err) } if len(n.Username) != 0 && len(n.Password) != 0 { @@ -120,7 +120,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Do(req) if err != nil { - return fmt.Errorf("error on request to %s : %s\n", addr.String(), err) + return fmt.Errorf("error on request to %s : %s", addr.String(), err) } defer resp.Body.Close() @@ -158,32 +158,31 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { } func (n *Apache) gatherScores(data string) map[string]interface{} { - var waiting, open int = 0, 0 - var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0 + var waiting, open = 0, 0 + var s, r, w, k, d, c, l, g, i = 0, 0, 0, 0, 0, 0, 0, 0, 0 - for _, s := range strings.Split(data, "") { - - switch s { + for _, str := range strings.Split(data, "") { + switch str { case "_": waiting++ case "S": - S++ + s++ case "R": - R++ + r++ case "W": - W++ + w++ case "K": - K++ + k++ case "D": - D++ + d++ case "C": - C++ + c++ case "L": - L++ + l++ case "G": - G++ + g++ case "I": - I++ + i++ case ".": open++ } @@ -191,15 +190,15 @@ func (n *Apache) gatherScores(data string) map[string]interface{} { fields := map[string]interface{}{ "scboard_waiting": float64(waiting), - "scboard_starting": float64(S), - "scboard_reading": float64(R), - "scboard_sending": float64(W), - "scboard_keepalive": float64(K), - "scboard_dnslookup": float64(D), - "scboard_closing": float64(C), - "scboard_logging": float64(L), - "scboard_finishing": float64(G), - "scboard_idle_cleanup": float64(I), + "scboard_starting": float64(s), + "scboard_reading": float64(r), + "scboard_sending": float64(w), + "scboard_keepalive": float64(k), + "scboard_dnslookup": float64(d), + "scboard_closing": float64(c), + "scboard_logging": float64(l), + "scboard_finishing": float64(g), + "scboard_idle_cleanup": float64(i), "scboard_open": float64(open), } return fields diff --git a/plugins/inputs/apache/apache_test.go b/plugins/inputs/apache/apache_test.go index ca8f4733c6bc5..534f6f9e1f7e9 100644 --- a/plugins/inputs/apache/apache_test.go +++ b/plugins/inputs/apache/apache_test.go @@ -31,7 +31,8 @@ Scoreboard: WW_____W_RW_R_W__RRR____WR_W___WW________W_WW_W_____R__R_WR__WRWR_RR func TestHTTPApache(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, apacheStatus) + _, err := fmt.Fprintln(w, apacheStatus) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go index a862bbfc881f8..2cb7522984119 100644 --- a/plugins/inputs/apcupsd/apcupsd.go +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -7,19 +7,20 @@ import ( "strings" "time" + apcupsdClient "github.com/mdlayher/apcupsd" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/mdlayher/apcupsd" ) const defaultAddress = "tcp://127.0.0.1:3551" -var defaultTimeout = internal.Duration{Duration: time.Duration(time.Second * 5)} +var defaultTimeout = config.Duration(5 * time.Second) type ApcUpsd struct { Servers []string - Timeout internal.Duration + Timeout config.Duration } func (*ApcUpsd) Description() string { @@ -42,60 +43,67 @@ func (*ApcUpsd) SampleConfig() string { func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { ctx := context.Background() - for _, addr := range h.Servers { - addrBits, err := url.Parse(addr) - if err != nil { - return err - } - if addrBits.Scheme == "" { - addrBits.Scheme = "tcp" - } - - ctx, cancel := context.WithTimeout(ctx, h.Timeout.Duration) - defer cancel() + for _, server := range h.Servers { + err := func(address string) error { + addrBits, err := url.Parse(address) + if err != nil { + return err + } + if addrBits.Scheme == "" { + addrBits.Scheme = "tcp" + } + + ctx, cancel := context.WithTimeout(ctx, time.Duration(h.Timeout)) + defer cancel() + + status, err := fetchStatus(ctx, addrBits) + if err != nil { + return err + } + + tags := map[string]string{ + "serial": status.SerialNumber, + "ups_name": status.UPSName, + "status": status.Status, + "model": status.Model, + } + + flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "status_flags": flags, + "input_voltage": status.LineVoltage, + "load_percent": status.LoadPercent, + "battery_charge_percent": status.BatteryChargePercent, + "time_left_ns": status.TimeLeft.Nanoseconds(), + "output_voltage": status.OutputVoltage, + "internal_temp": status.InternalTemp, + "battery_voltage": status.BatteryVoltage, + "input_frequency": status.LineFrequency, + "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), + "nominal_input_voltage": status.NominalInputVoltage, + "nominal_battery_voltage": status.NominalBatteryVoltage, + "nominal_power": status.NominalPower, + "firmware": status.Firmware, + "battery_date": status.BatteryDate, + } + + acc.AddFields("apcupsd", fields, tags) + return nil + }(server) - status, err := fetchStatus(ctx, addrBits) if err != nil { return err } - - tags := map[string]string{ - "serial": status.SerialNumber, - "ups_name": status.UPSName, - "status": status.Status, - "model": status.Model, - } - - flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) - if err != nil { - return err - } - - fields := map[string]interface{}{ - "status_flags": flags, - "input_voltage": status.LineVoltage, - "load_percent": status.LoadPercent, - "battery_charge_percent": status.BatteryChargePercent, - "time_left_ns": status.TimeLeft.Nanoseconds(), - "output_voltage": status.OutputVoltage, - "internal_temp": status.InternalTemp, - "battery_voltage": status.BatteryVoltage, - "input_frequency": status.LineFrequency, - "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), - "nominal_input_voltage": status.NominalInputVoltage, - "nominal_battery_voltage": status.NominalBatteryVoltage, - "nominal_power": status.NominalPower, - "firmware": status.Firmware, - "battery_date": status.BatteryDate, - } - - acc.AddFields("apcupsd", fields, tags) } return nil } -func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) { - client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host) +func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsdClient.Status, error) { + client, err := apcupsdClient.DialContext(ctx, addr.Scheme, addr.Host) if err != nil { return nil, err } diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index e749d5137daba..f21c5a4c4ce94 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -7,12 +7,13 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) -func TestApcupsdDocs(t *testing.T) { +func TestApcupsdDocs(_ *testing.T) { apc := &ApcUpsd{} apc.Description() apc.SampleConfig() @@ -35,31 +36,33 @@ func listen(ctx context.Context, t *testing.T, out [][]byte) (string, error) { } go func() { - for ctx.Err() == nil { - defer ln.Close() - - conn, err := ln.Accept() - if err != nil { - continue - } - defer conn.Close() - conn.SetReadDeadline(time.Now().Add(time.Second)) + defer ln.Close() - in := make([]byte, 128) - n, err := conn.Read(in) - require.NoError(t, err, "failed to read from connection") - - status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} - want, got := status, in[:n] - require.Equal(t, want, got) - - // Run against test function and append EOF to end of output bytes - out = append(out, []byte{0, 0}) - - for _, o := range out { - _, err := conn.Write(o) - require.NoError(t, err, "failed to write to connection") - } + for ctx.Err() == nil { + func() { + conn, err := ln.Accept() + if err != nil { + return + } + defer conn.Close() + require.NoError(t, conn.SetReadDeadline(time.Now().Add(time.Second))) + + in := make([]byte, 128) + n, err := conn.Read(in) + require.NoError(t, err, "failed to read from connection") + + status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} + want, got := status, in[:n] + require.Equal(t, want, got) + + // Run against test function and append EOF to end of output bytes + out = append(out, []byte{0, 0}) + + for _, o := range out { + _, err := conn.Write(o) + require.NoError(t, err, "failed to write to connection") + } + }() } }() @@ -102,7 +105,6 @@ func TestConfig(t *testing.T) { } }) } - } func TestApcupsdGather(t *testing.T) { @@ -138,9 +140,9 @@ func TestApcupsdGather(t *testing.T) { "time_on_battery_ns": int64(0), "nominal_input_voltage": float64(230), "nominal_battery_voltage": float64(12), - "nominal_power": int(865), - "firmware": string("857.L3 .I USB FW:L3"), - "battery_date": time.Date(2016, time.September, 06, 0, 0, 0, 0, time.UTC), + "nominal_power": 865, + "firmware": "857.L3 .I USB FW:L3", + "battery_date": "2016-09-06", }, out: genOutput, }, @@ -155,7 +157,6 @@ func TestApcupsdGather(t *testing.T) { ) for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) @@ -207,6 +208,7 @@ func genOutput() [][]byte { "NOMBATTV : 12.0 Volts", "NOMPOWER : 865 Watts", "FIRMWARE : 857.L3 .I USB FW:L3", + "ALARMDEL : Low Battery", } var out [][]byte diff --git a/plugins/inputs/aurora/aurora.go b/plugins/inputs/aurora/aurora.go index fc6f82aadda17..45a2fabb6249a 100644 --- a/plugins/inputs/aurora/aurora.go +++ b/plugins/inputs/aurora/aurora.go @@ -11,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -43,11 +43,11 @@ var ( type Vars map[string]interface{} type Aurora struct { - Schedulers []string `toml:"schedulers"` - Roles []string `toml:"roles"` - Timeout internal.Duration `toml:"timeout"` - Username string `toml:"username"` - Password string `toml:"password"` + Schedulers []string `toml:"schedulers"` + Roles []string `toml:"roles"` + Timeout config.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig client *http.Client @@ -95,7 +95,7 @@ func (a *Aurora) Gather(acc telegraf.Accumulator) error { } } - ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) defer cancel() var wg sync.WaitGroup @@ -147,8 +147,8 @@ func (a *Aurora) initialize() error { urls = append(urls, loc) } - if a.Timeout.Duration < time.Second { - a.Timeout.Duration = defaultTimeout + if a.Timeout < config.Duration(time.Second) { + a.Timeout = config.Duration(defaultTimeout) } if len(a.Roles) == 0 { @@ -190,7 +190,9 @@ func (a *Aurora) gatherRole(ctx context.Context, origin *url.URL) (RoleType, err if err != nil { return Unknown, err } - resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return Unknown, fmt.Errorf("closing body failed: %v", err) + } switch resp.StatusCode { case http.StatusOK: diff --git a/plugins/inputs/aurora/aurora_test.go b/plugins/inputs/aurora/aurora_test.go index 6e2c004f2e7b3..e22488929e545 100644 --- a/plugins/inputs/aurora/aurora_test.go +++ b/plugins/inputs/aurora/aurora_test.go @@ -46,7 +46,8 @@ func TestAurora(t *testing.T) { "variable_scrape_micros_total_per_sec": 1485.0 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -86,7 +87,8 @@ func TestAurora(t *testing.T) { }, varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte("{}")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -104,7 +106,8 @@ func TestAurora(t *testing.T) { "foo": "bar" }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -123,7 +126,8 @@ func TestAurora(t *testing.T) { "foo": 1e309 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -142,7 +146,8 @@ func TestAurora(t *testing.T) { "foo": 9223372036854775808 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -158,7 +163,8 @@ func TestAurora(t *testing.T) { varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body := `{]` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -176,7 +182,8 @@ func TestAurora(t *testing.T) { "value": 42 }` w.WriteHeader(http.StatusServiceUnavailable) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -244,7 +251,8 @@ func TestBasicAuth(t *testing.T) { require.Equal(t, tt.username, username) require.Equal(t, tt.password, password) w.WriteHeader(http.StatusOK) - w.Write([]byte("{}")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) }) var acc testutil.Accumulator diff --git a/plugins/inputs/bcache/README.md b/plugins/inputs/bcache/README.md index 11d567ec5616b..88c9f14f9236a 100644 --- a/plugins/inputs/bcache/README.md +++ b/plugins/inputs/bcache/README.md @@ -56,15 +56,15 @@ cache_readaheads Using this configuration: ```toml -[bcache] - # Bcache sets path - # If not specified, then default is: - # bcachePath = "/sys/fs/bcache" - # - # By default, telegraf gather stats for all bcache devices - # Setting devices will restrict the stats to the specified - # bcache devices. - # bcacheDevs = ["bcache0", ...] +[[inputs.bcache]] + ## Bcache sets path + ## If not specified, then default is: + bcachePath = "/sys/fs/bcache" + + ## By default, Telegraf gather stats for all bcache devices + ## Setting devices will restrict the stats to the specified + ## bcache devices. + bcacheDevs = ["bcache0"] ``` When run with: diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 849e6dd37de0d..84eb3262fdf28 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -1,8 +1,13 @@ +//go:build !windows +// +build !windows + +// bcache doesn't aim for Windows + package bcache import ( "errors" - "io/ioutil" + "fmt" "os" "path/filepath" "strconv" @@ -22,7 +27,7 @@ var sampleConfig = ` ## If not specified, then default is: bcachePath = "/sys/fs/bcache" - ## By default, telegraf gather stats for all bcache devices + ## By default, Telegraf gather stats for all bcache devices ## Setting devices will restrict the stats to the specified ## bcache devices. bcacheDevs = ["bcache0"] @@ -79,7 +84,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { if len(metrics) == 0 { return errors.New("can't read any stats file") } - file, err := ioutil.ReadFile(bdev + "/dirty_data") + file, err := os.ReadFile(bdev + "/dirty_data") if err != nil { return err } @@ -91,7 +96,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { for _, path := range metrics { key := filepath.Base(path) - file, err := ioutil.ReadFile(path) + file, err := os.ReadFile(path) rawValue := strings.TrimSpace(string(file)) if err != nil { return err @@ -124,7 +129,7 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { } bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*") if len(bdevs) < 1 { - return errors.New("Can't find any bcache device") + return errors.New("can't find any bcache device") } for _, bdev := range bdevs { if restrictDevs { @@ -133,7 +138,9 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { continue } } - b.gatherBcache(bdev, acc) + if err := b.gatherBcache(bdev, acc); err != nil { + return fmt.Errorf("gathering bcache failed: %v", err) + } } return nil } diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index bd191528fd014..4c62e0f014f14 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -1,7 +1,9 @@ +//go:build !windows +// +build !windows + package bcache import ( - "io/ioutil" "os" "testing" @@ -10,26 +12,26 @@ import ( ) const ( - dirty_data = "1.5G" - bypassed = "4.7T" - cache_bypass_hits = "146155333" - cache_bypass_misses = "0" - cache_hit_ratio = "90" - cache_hits = "511469583" - cache_miss_collisions = "157567" - cache_misses = "50616331" - cache_readaheads = "2" + dirtyData = "1.5G" + bypassed = "4.7T" + cacheBypassHits = "146155333" + cacheBypassMisses = "0" + cacheHitRatio = "90" + cacheHits = "511469583" + cacheMissCollisions = "157567" + cacheMisses = "50616331" + cacheReadaheads = "2" ) var ( testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache" - testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411" + testBcacheUUIDPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411" testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0" testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10" ) func TestBcacheGeneratesMetrics(t *testing.T) { - err := os.MkdirAll(testBcacheUuidPath, 0755) + err := os.MkdirAll(testBcacheUUIDPath, 0755) require.NoError(t, err) err = os.MkdirAll(testBcacheDevPath, 0755) @@ -38,49 +40,49 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755) require.NoError(t, err) - err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0") + err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUUIDPath+"/bdev0") require.NoError(t, err) - err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev") + err = os.Symlink(testBcacheDevPath, testBcacheUUIDPath+"/bdev0/dev") require.NoError(t, err) - err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755) + err = os.MkdirAll(testBcacheUUIDPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", - []byte(dirty_data), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", + []byte(dirtyData), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", - []byte(cache_bypass_hits), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", + []byte(cacheBypassHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", - []byte(cache_bypass_misses), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", + []byte(cacheBypassMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", - []byte(cache_hit_ratio), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", + []byte(cacheHitRatio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", - []byte(cache_hits), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", + []byte(cacheHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", - []byte(cache_miss_collisions), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", + []byte(cacheMissCollisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", - []byte(cache_misses), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", + []byte(cacheMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", - []byte(cache_readaheads), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", + []byte(cacheReadaheads), 0644) require.NoError(t, err) fields := map[string]interface{}{ diff --git a/plugins/inputs/bcache/bcache_windows.go b/plugins/inputs/bcache/bcache_windows.go new file mode 100644 index 0000000000000..faeba8888bb3b --- /dev/null +++ b/plugins/inputs/bcache/bcache_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package bcache diff --git a/plugins/inputs/beanstalkd/beanstalkd.go b/plugins/inputs/beanstalkd/beanstalkd.go index 932edd301f910..b8a5c97974eef 100644 --- a/plugins/inputs/beanstalkd/beanstalkd.go +++ b/plugins/inputs/beanstalkd/beanstalkd.go @@ -62,7 +62,10 @@ func (b *Beanstalkd) Gather(acc telegraf.Accumulator) error { for _, tube := range tubes { wg.Add(1) go func(tube string) { - b.gatherTubeStats(connection, tube, acc) + err := b.gatherTubeStats(connection, tube, acc) + if err != nil { + acc.AddError(err) + } wg.Done() }(tube) } @@ -128,7 +131,7 @@ func (b *Beanstalkd) gatherServerStats(connection *textproto.Conn, acc telegraf. }, map[string]string{ "hostname": stats.Hostname, - "id": stats.Id, + "id": stats.ID, "server": b.Server, "version": stats.Version, }, @@ -169,13 +172,13 @@ func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, ac } func runQuery(connection *textproto.Conn, cmd string, result interface{}) error { - requestId, err := connection.Cmd(cmd) + requestID, err := connection.Cmd(cmd) if err != nil { return err } - connection.StartResponse(requestId) - defer connection.EndResponse(requestId) + connection.StartResponse(requestID) + defer connection.EndResponse(requestID) status, err := connection.ReadLine() if err != nil { @@ -240,7 +243,7 @@ type statsResponse struct { CurrentWaiting int `yaml:"current-waiting"` CurrentWorkers int `yaml:"current-workers"` Hostname string `yaml:"hostname"` - Id string `yaml:"id"` + ID string `yaml:"id"` JobTimeouts int `yaml:"job-timeouts"` MaxJobSize int `yaml:"max-job-size"` Pid int `yaml:"pid"` diff --git a/plugins/inputs/beanstalkd/beanstalkd_test.go b/plugins/inputs/beanstalkd/beanstalkd_test.go index 92c108e06aa91..9d97a682c4873 100644 --- a/plugins/inputs/beanstalkd/beanstalkd_test.go +++ b/plugins/inputs/beanstalkd/beanstalkd_test.go @@ -22,6 +22,7 @@ func TestBeanstalkd(t *testing.T) { tubesConfig []string expectedTubes []tubeStats notExpectedTubes []tubeStats + expectedError string }{ { name: "All tubes stats", @@ -50,15 +51,14 @@ func TestBeanstalkd(t *testing.T) { {name: "default", fields: defaultTubeFields}, {name: "test", fields: testTubeFields}, }, + expectedError: "input does not match format", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { server, err := startTestServer(t) - if err != nil { - t.Fatalf("Unable to create test server") - } + require.NoError(t, err, "Unable to create test server") defer server.Close() serverAddress := server.Addr().String() @@ -68,8 +68,13 @@ func TestBeanstalkd(t *testing.T) { } var acc testutil.Accumulator - require.NoError(t, acc.GatherError(plugin.Gather)) - + err = acc.GatherError(plugin.Gather) + if test.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Equal(t, test.expectedError, err.Error()) + } acc.AssertContainsTaggedFields(t, "beanstalkd_overview", overviewFields, getOverviewTags(serverAddress), @@ -110,8 +115,8 @@ func startTestServer(t *testing.T) (net.Listener, error) { tp := textproto.NewConn(connection) defer tp.Close() - sendSuccessResponse := func(body string) { - tp.PrintfLine("OK %d\r\n%s", len(body), body) + sendSuccessResponse := func(body string) error { + return tp.PrintfLine("OK %d\r\n%s", len(body), body) } for { @@ -125,15 +130,30 @@ func startTestServer(t *testing.T) (net.Listener, error) { switch cmd { case "list-tubes": - sendSuccessResponse(listTubesResponse) + if err := sendSuccessResponse(listTubesResponse); err != nil { + t.Logf("sending response %q failed: %v", listTubesResponse, err) + return + } case "stats": - sendSuccessResponse(statsResponse) + if err := sendSuccessResponse(statsResponse); err != nil { + t.Logf("sending response %q failed: %v", statsResponse, err) + return + } case "stats-tube default": - sendSuccessResponse(statsTubeDefaultResponse) + if err := sendSuccessResponse(statsTubeDefaultResponse); err != nil { + t.Logf("sending response %q failed: %v", statsTubeDefaultResponse, err) + return + } case "stats-tube test": - sendSuccessResponse(statsTubeTestResponse) + if err := sendSuccessResponse(statsTubeTestResponse); err != nil { + t.Logf("sending response %q failed: %v", statsTubeTestResponse, err) + return + } case "stats-tube unknown": - tp.PrintfLine("NOT_FOUND") + if err := tp.PrintfLine("NOT_FOUND"); err != nil { + t.Logf("sending response %q failed: %v", "NOT_FOUND", err) + return + } default: t.Log("Test server: unknown command") } diff --git a/plugins/inputs/beat/README.md b/plugins/inputs/beat/README.md new file mode 100644 index 0000000000000..d819b5ab950b8 --- /dev/null +++ b/plugins/inputs/beat/README.md @@ -0,0 +1,143 @@ +# Beat Input Plugin +The Beat plugin will collect metrics from the given Beat instances. It is +known to work with Filebeat and Kafkabeat. +### Configuration: +```toml + ## An URL from which to read Beat-formatted JSON + ## Default is "http://127.0.0.1:5066". + url = "http://127.0.0.1:5066" + + ## Enable collection of the listed stats + ## An empty list means collect all. Available options are currently + ## "beat", "libbeat", "system" and "filebeat". + # include = ["beat", "libbeat", "filebeat"] + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` +### Measurements & Fields +- **beat** + * Fields: + - cpu_system_ticks + - cpu_system_time_ms + - cpu_total_ticks + - cpu_total_time_ms + - cpu_total_value + - cpu_user_ticks + - cpu_user_time_ms + - info_uptime_ms + - memstats_gc_next + - memstats_memory_alloc + - memstats_memory_total + - memstats_rss + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_filebeat** + * Fields: + - events_active + - events_added + - events_done + - harvester_closed + - harvester_open_files + - harvester_running + - harvester_skipped + - harvester_started + - input_log_files_renamed + - input_log_files_truncated + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_libbeat** + * Fields: + - config_module_running + - config_module_starts + - config_module_stops + - config_reloads + - output_events_acked + - output_events_active + - output_events_batches + - output_events_dropped + - output_events_duplicates + - output_events_failed + - output_events_total + - output_type + - output_read_bytes + - output_read_errors + - output_write_bytes + - output_write_errors + - outputs_kafka_bytes_read + - outputs_kafka_bytes_write + - pipeline_clients + - pipeline_events_active + - pipeline_events_dropped + - pipeline_events_failed + - pipeline_events_filtered + - pipeline_events_published + - pipeline_events_retry + - pipeline_events_total + - pipeline_queue_acked + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_system** + * Field: + - cpu_cores + - load_1 + - load_15 + - load_5 + - load_norm_1 + - load_norm_15 + - load_norm_5 + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +### Example Output: +``` +$ telegraf --input-filter beat --test + +> beat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + cpu_system_ticks=656750,cpu_system_time_ms=656750,cpu_total_ticks=5461190,cpu_total_time_ms=5461198,cpu_total_value=5461190,cpu_user_ticks=4804440,cpu_user_time_ms=4804448,info_uptime_ms=342634196,memstats_gc_next=20199584,memstats_memory_alloc=12547424,memstats_memory_total=486296424792,memstats_rss=72552448 1540316047000000000 +> beat_libbeat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + config_module_running=0,config_module_starts=0,config_module_stops=0,config_reloads=0,output_events_acked=192404,output_events_active=0,output_events_batches=1607,output_events_dropped=0,output_events_duplicates=0,output_events_failed=0,output_events_total=192404,output_read_bytes=0,output_read_errors=0,output_write_bytes=0,output_write_errors=0,outputs_kafka_bytes_read=1118528,outputs_kafka_bytes_write=48002014,pipeline_clients=1,pipeline_events_active=0,pipeline_events_dropped=0,pipeline_events_failed=0,pipeline_events_filtered=11496,pipeline_events_published=192404,pipeline_events_retry=14,pipeline_events_total=203900,pipeline_queue_acked=192404 1540316047000000000 +> beat_system,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + cpu_cores=32,load_1=46.08,load_15=49.82,load_5=47.88,load_norm_1=1.44,load_norm_15=1.5569,load_norm_5=1.4963 1540316047000000000 +> beat_filebeat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + events_active=0,events_added=3223,events_done=3223,harvester_closed=0,harvester_open_files=0,harvester_running=0,harvester_skipped=0,harvester_started=0,input_log_files_renamed=0,input_log_files_truncated=0 1540320286000000000 +``` diff --git a/plugins/inputs/beat/beat.go b/plugins/inputs/beat/beat.go new file mode 100644 index 0000000000000..08b5c3851d6c0 --- /dev/null +++ b/plugins/inputs/beat/beat.go @@ -0,0 +1,233 @@ +package beat + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +const sampleConfig = ` + ## An URL from which to read Beat-formatted JSON + ## Default is "http://127.0.0.1:5066". + url = "http://127.0.0.1:5066" + + ## Enable collection of the listed stats + ## An empty list means collect all. Available options are currently + ## "beat", "libbeat", "system" and "filebeat". + # include = ["beat", "libbeat", "filebeat"] + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +const description = "Read metrics exposed by Beat" + +const suffixInfo = "/" +const suffixStats = "/stats" + +type Info struct { + Beat string `json:"beat"` + Hostname string `json:"hostname"` + Name string `json:"name"` + UUID string `json:"uuid"` + Version string `json:"version"` +} + +type Stats struct { + Beat map[string]interface{} `json:"beat"` + FileBeat interface{} `json:"filebeat"` + Libbeat interface{} `json:"libbeat"` + System interface{} `json:"system"` +} + +type Beat struct { + URL string `toml:"url"` + + Includes []string `toml:"include"` + + Username string `toml:"username"` + Password string `toml:"password"` + Method string `toml:"method"` + Headers map[string]string `toml:"headers"` + HostHeader string `toml:"host_header"` + Timeout config.Duration `toml:"timeout"` + + tls.ClientConfig + client *http.Client +} + +func NewBeat() *Beat { + return &Beat{ + URL: "http://127.0.0.1:5066", + Includes: []string{"beat", "libbeat", "filebeat"}, + Method: "GET", + Headers: make(map[string]string), + Timeout: config.Duration(time.Second * 5), + } +} + +func (beat *Beat) Init() error { + availableStats := []string{"beat", "libbeat", "system", "filebeat"} + + var err error + beat.client, err = beat.createHTTPClient() + + if err != nil { + return err + } + + err = choice.CheckSlice(beat.Includes, availableStats) + if err != nil { + return err + } + + return nil +} + +func (beat *Beat) Description() string { + return description +} + +func (beat *Beat) SampleConfig() string { + return sampleConfig +} + +// createHTTPClient create a clients to access API +func (beat *Beat) createHTTPClient() (*http.Client, error) { + tlsConfig, err := beat.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: time.Duration(beat.Timeout), + } + + return client, nil +} + +// gatherJSONData query the data source and parse the response JSON +func (beat *Beat) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest(beat.Method, address, nil) + if err != nil { + return err + } + + if beat.Username != "" { + request.SetBasicAuth(beat.Username, beat.Password) + } + for k, v := range beat.Headers { + request.Header.Add(k, v) + } + if beat.HostHeader != "" { + request.Host = beat.HostHeader + } + + response, err := beat.client.Do(request) + if err != nil { + return err + } + + defer response.Body.Close() + + return json.NewDecoder(response.Body).Decode(value) +} + +func (beat *Beat) Gather(accumulator telegraf.Accumulator) error { + beatStats := &Stats{} + beatInfo := &Info{} + + infoURL, err := url.Parse(beat.URL + suffixInfo) + if err != nil { + return err + } + statsURL, err := url.Parse(beat.URL + suffixStats) + if err != nil { + return err + } + + err = beat.gatherJSONData(infoURL.String(), beatInfo) + if err != nil { + return err + } + tags := map[string]string{ + "beat_beat": beatInfo.Beat, + "beat_id": beatInfo.UUID, + "beat_name": beatInfo.Name, + "beat_host": beatInfo.Hostname, + "beat_version": beatInfo.Version, + } + + err = beat.gatherJSONData(statsURL.String(), beatStats) + if err != nil { + return err + } + + for _, name := range beat.Includes { + var stats interface{} + var metric string + + switch name { + case "beat": + stats = beatStats.Beat + metric = "beat" + case "filebeat": + stats = beatStats.FileBeat + metric = "beat_filebeat" + case "system": + stats = beatStats.System + metric = "beat_system" + case "libbeat": + stats = beatStats.Libbeat + metric = "beat_libbeat" + default: + return fmt.Errorf("unknown stats-type %q", name) + } + flattener := jsonparser.JSONFlattener{} + err := flattener.FullFlattenJSON("", stats, true, true) + if err != nil { + return err + } + accumulator.AddFields(metric, flattener.Fields, tags) + } + + return nil +} + +func init() { + inputs.Add("beat", func() telegraf.Input { + return NewBeat() + }) +} diff --git a/plugins/inputs/beat/beat6_info.json b/plugins/inputs/beat/beat6_info.json new file mode 100644 index 0000000000000..3cc318c330447 --- /dev/null +++ b/plugins/inputs/beat/beat6_info.json @@ -0,0 +1,7 @@ +{ + "beat": "filebeat", + "hostname": "node-6", + "name": "node-6-test", + "uuid": "9c1c8697-acb4-4df0-987d-28197814f785", + "version": "6.4.2" +} diff --git a/plugins/inputs/beat/beat6_stats.json b/plugins/inputs/beat/beat6_stats.json new file mode 100644 index 0000000000000..f34b9d1f06d1e --- /dev/null +++ b/plugins/inputs/beat/beat6_stats.json @@ -0,0 +1,137 @@ +{ + "beat": { + "cpu": { + "system": { + "ticks": 626970, + "time": { + "ms": 626972 + } + }, + "total": { + "ticks": 5215010, + "time": { + "ms": 5215018 + }, + "value": 5215010 + }, + "user": { + "ticks": 4588040, + "time": { + "ms": 4588046 + } + } + }, + "info": { + "ephemeral_id": "809e3b63-4fa0-4f74-822a-8e3c08298336", + "uptime": { + "ms": 327248661 + } + }, + "memstats": { + "gc_next": 20611808, + "memory_alloc": 12692544, + "memory_total": 462910102088, + "rss": 80273408 + } + }, + "filebeat": { + "events": { + "active": 0, + "added": 182990, + "done": 182990 + }, + "harvester": { + "closed": 2222, + "open_files": 4, + "running": 4, + "skipped": 0, + "started": 2226 + }, + "input": { + "log": { + "files": { + "renamed": 0, + "truncated": 0 + } + } + } + }, + "libbeat": { + "config": { + "module": { + "running": 0, + "starts": 0, + "stops": 0 + }, + "reloads": 0 + }, + "output": { + "events": { + "acked": 172067, + "active": 0, + "batches": 1490, + "dropped": 0, + "duplicates": 0, + "failed": 0, + "total": 172067 + }, + "read": { + "bytes": 0, + "errors": 0 + }, + "type": "kafka", + "write": { + "bytes": 0, + "errors": 0 + } + }, + "outputs": { + "kafka": { + "bytes_read": 1048670, + "bytes_write": 43136887 + } + }, + "pipeline": { + "clients": 1, + "events": { + "active": 0, + "dropped": 0, + "failed": 0, + "filtered": 10923, + "published": 172067, + "retry": 14, + "total": 182990 + }, + "queue": { + "acked": 172067 + } + } + }, + "registrar": { + "states": { + "cleanup": 3446, + "current": 16409, + "update": 182990 + }, + "writes": { + "fail": 0, + "success": 11718, + "total": 11718 + } + }, + "system": { + "cpu": { + "cores": 32 + }, + "load": { + "1": 32.49, + "15": 41.9, + "5": 40.16, + "norm": { + "1": 1.0153, + "15": 1.3094, + "5": 1.255 + } + } + } +} diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go new file mode 100644 index 0000000000000..433e8fcd61337 --- /dev/null +++ b/plugins/inputs/beat/beat_test.go @@ -0,0 +1,203 @@ +package beat + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func Test_BeatStats(t *testing.T) { + var beat6StatsAccumulator testutil.Accumulator + var beatTest = NewBeat() + // System stats are disabled by default + beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} + require.NoError(t, beatTest.Init()) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { + var jsonFilePath string + + switch request.URL.Path { + case suffixInfo: + jsonFilePath = "beat6_info.json" + case suffixStats: + jsonFilePath = "beat6_stats.json" + default: + require.FailNow(t, "cannot handle request") + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + _, err = w.Write(data) + require.NoError(t, err, "could not write data") + })) + requestURL, err := url.Parse(beatTest.URL) + require.NoErrorf(t, err, "can't parse URL %s", beatTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(t, err, "can't listen for %s: %v", requestURL, err) + + fakeServer.Start() + defer fakeServer.Close() + + require.NoError(t, err, beatTest.Gather(&beat6StatsAccumulator)) + + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat", + map[string]interface{}{ + "cpu_system_ticks": float64(626970), + "cpu_system_time_ms": float64(626972), + "cpu_total_ticks": float64(5215010), + "cpu_total_time_ms": float64(5215018), + "cpu_total_value": float64(5215010), + "cpu_user_ticks": float64(4588040), + "cpu_user_time_ms": float64(4588046), + "info_uptime_ms": float64(327248661), + "info_ephemeral_id": "809e3b63-4fa0-4f74-822a-8e3c08298336", + "memstats_gc_next": float64(20611808), + "memstats_memory_alloc": float64(12692544), + "memstats_memory_total": float64(462910102088), + "memstats_rss": float64(80273408), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat_filebeat", + map[string]interface{}{ + "events_active": float64(0), + "events_added": float64(182990), + "events_done": float64(182990), + "harvester_closed": float64(2222), + "harvester_open_files": float64(4), + "harvester_running": float64(4), + "harvester_skipped": float64(0), + "harvester_started": float64(2226), + "input_log_files_renamed": float64(0), + "input_log_files_truncated": float64(0), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat_libbeat", + map[string]interface{}{ + "config_module_running": float64(0), + "config_module_starts": float64(0), + "config_module_stops": float64(0), + "config_reloads": float64(0), + "output_type": "kafka", + "output_events_acked": float64(172067), + "output_events_active": float64(0), + "output_events_batches": float64(1490), + "output_events_dropped": float64(0), + "output_events_duplicates": float64(0), + "output_events_failed": float64(0), + "output_events_total": float64(172067), + "output_read_bytes": float64(0), + "output_read_errors": float64(0), + "output_write_bytes": float64(0), + "output_write_errors": float64(0), + "outputs_kafka_bytes_read": float64(1048670), + "outputs_kafka_bytes_write": float64(43136887), + "pipeline_clients": float64(1), + "pipeline_events_active": float64(0), + "pipeline_events_dropped": float64(0), + "pipeline_events_failed": float64(0), + "pipeline_events_filtered": float64(10923), + "pipeline_events_published": float64(172067), + "pipeline_events_retry": float64(14), + "pipeline_events_total": float64(182990), + "pipeline_queue_acked": float64(172067), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat_system", + map[string]interface{}{ + "cpu_cores": float64(32), + "load_1": float64(32.49), + "load_15": float64(41.9), + "load_5": float64(40.16), + "load_norm_1": float64(1.0153), + "load_norm_15": float64(1.3094), + "load_norm_5": float64(1.255), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) +} + +func Test_BeatRequest(t *testing.T) { + var beat6StatsAccumulator testutil.Accumulator + beatTest := NewBeat() + // System stats are disabled by default + beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} + require.NoError(t, beatTest.Init()) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { + var jsonFilePath string + + switch request.URL.Path { + case suffixInfo: + jsonFilePath = "beat6_info.json" + case suffixStats: + jsonFilePath = "beat6_stats.json" + default: + require.FailNow(t, "cannot handle request") + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + require.Equal(t, request.Host, "beat.test.local") + require.Equal(t, request.Method, "POST") + require.Equal(t, request.Header.Get("Authorization"), "Basic YWRtaW46UFdE") + require.Equal(t, request.Header.Get("X-Test"), "test-value") + + _, err = w.Write(data) + require.NoError(t, err, "could not write data") + })) + + requestURL, err := url.Parse(beatTest.URL) + require.NoErrorf(t, err, "can't parse URL %s", beatTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(t, err, "can't listen for %s: %v", requestURL, err) + fakeServer.Start() + defer fakeServer.Close() + + beatTest.Headers["X-Test"] = "test-value" + beatTest.HostHeader = "beat.test.local" + beatTest.Method = "POST" + beatTest.Username = "admin" + beatTest.Password = "PWD" + + require.NoError(t, beatTest.Gather(&beat6StatsAccumulator)) +} diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md index e3bcf6a75b252..d67a02020f527 100644 --- a/plugins/inputs/bind/README.md +++ b/plugins/inputs/bind/README.md @@ -20,6 +20,7 @@ not enable support for JSON statistics in their BIND packages. trailing slash in the URL. Default is "http://localhost:8053/xml/v3". - **gather_memory_contexts** bool: Report per-context memory statistics. - **gather_views** bool: Report per-view query statistics. +- **timeout** Timeout for http requests made by bind nameserver (example: "4s"). The following table summarizes the URL formats which should be used, depending on your BIND version and configured statistics channel. diff --git a/plugins/inputs/bind/bind.go b/plugins/inputs/bind/bind.go index 967c9031a2634..dd7b3d128c9f0 100644 --- a/plugins/inputs/bind/bind.go +++ b/plugins/inputs/bind/bind.go @@ -8,6 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -15,6 +16,9 @@ type Bind struct { Urls []string GatherMemoryContexts bool GatherViews bool + Timeout config.Duration `toml:"timeout"` + + client http.Client } var sampleConfig = ` @@ -23,11 +27,10 @@ var sampleConfig = ` # urls = ["http://localhost:8053/xml/v3"] # gather_memory_contexts = false # gather_views = false -` -var client = &http.Client{ - Timeout: time.Duration(4 * time.Second), -} + ## Timeout for http requests made by bind nameserver + # timeout = "4s" +` func (b *Bind) Description() string { return "Read BIND nameserver XML statistics" @@ -37,6 +40,14 @@ func (b *Bind) SampleConfig() string { return sampleConfig } +func (b *Bind) Init() error { + b.client = http.Client{ + Timeout: time.Duration(b.Timeout), + } + + return nil +} + func (b *Bind) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup @@ -47,14 +58,14 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error { for _, u := range b.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(b.gatherUrl(addr, acc)) + acc.AddError(b.gatherURL(addr, acc)) }(addr) } @@ -62,7 +73,7 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error { return nil } -func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (b *Bind) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { switch addr.Path { case "": // BIND 9.6 - 9.8 @@ -77,7 +88,7 @@ func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { // BIND 9.9+ return b.readStatsXMLv3(addr, acc) default: - return fmt.Errorf("URL %s is ambiguous. Please check plugin documentation for supported URL formats.", + return fmt.Errorf("provided URL %s is ambiguous, please check plugin documentation for supported URL formats", addr) } } diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index 6ed953b691dd3..f7849e1735255 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -5,6 +5,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -20,6 +21,9 @@ func TestBindJsonStats(t *testing.T) { Urls: []string{ts.URL + "/json/v1"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator @@ -190,6 +194,9 @@ func TestBindXmlStatsV2(t *testing.T) { Urls: []string{ts.URL + "/xml/v2"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator @@ -392,6 +399,9 @@ func TestBindXmlStatsV3(t *testing.T) { Urls: []string{ts.URL + "/xml/v3"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator @@ -613,5 +623,5 @@ func TestBindUnparseableURL(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.Contains(t, err.Error(), "Unable to parse address") + assert.Contains(t, err.Error(), "unable to parse address") } diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index 87b6065e2eb1c..61307683aac35 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -31,7 +31,7 @@ type jsonMemory struct { ContextSize int64 Lost int64 Contexts []struct { - Id string + ID string Name string Total int64 InUse int64 @@ -58,12 +58,14 @@ func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stat tags[k] = v } - grouper.Add("bind_counter", tags, ts, name, value) + if err := grouper.Add("bind_counter", tags, ts, name, value); err != nil { + acc.AddError(fmt.Errorf("adding field %q to group failed: %v", name, err)) + } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -113,7 +115,7 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Memory.Contexts { - tags := map[string]string{"url": urlTag, "id": c.Id, "name": c.Name, "source": host, "port": port} + tags := map[string]string{"url": urlTag, "id": c.ID, "name": c.Name, "source": host, "port": port} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) @@ -133,15 +135,17 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st "type": cntrType, } - grouper.Add("bind_counter", tags, ts, cntrName, value) + if err := grouper.Add("bind_counter", tags, ts, cntrName, value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -153,21 +157,29 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full jsonStats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeUrl := addr.String() + suffix + err := func() error { + scrapeURL := addr.String() + suffix - resp, err := client.Get(scrapeUrl) - if err != nil { - return err - } + resp, err := b.client.Get(scrapeURL) + if err != nil { + return err + } - defer resp.Body.Close() + defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) - } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) + } + + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode JSON blob: %s", err) + } - if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode JSON blob: %s", err) + return nil + }() + + if err != nil { + return err } } diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go index 5e17851fb671c..5a0092c5af7cc 100644 --- a/plugins/inputs/bind/xml_stats_v2.go +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -42,7 +42,7 @@ type v2Statistics struct { Memory struct { Contexts []struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater - Id string `xml:"id"` + ID string `xml:"id"` Name string `xml:"name"` Total int64 `xml:"total"` InUse int64 `xml:"inuse"` @@ -75,12 +75,14 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta tags[k] = v } - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding field %q to group failed: %v", c.Name, err)) + } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -89,7 +91,7 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { var stats v2Root - resp, err := client.Get(addr.String()) + resp, err := b.client.Get(addr.String()) if err != nil { return err } @@ -101,7 +103,7 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { } if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode XML document: %s", err) + return fmt.Errorf("unable to decode XML document: %s", err) } tags := map[string]string{"url": addr.Host} @@ -142,7 +144,7 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Statistics.Memory.Contexts { - tags := map[string]string{"url": addr.Host, "id": c.Id, "name": c.Name, "source": host, "port": port} + tags := map[string]string{"url": addr.Host, "id": c.ID, "name": c.Name, "source": host, "port": port} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go index 89e4ea0b8fcb6..ef303f4bf052c 100644 --- a/plugins/inputs/bind/xml_stats_v3.go +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -25,7 +25,7 @@ type v3Stats struct { type v3Memory struct { Contexts []struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater - Id string `xml:"id"` + ID string `xml:"id"` Name string `xml:"name"` Total int64 `xml:"total"` InUse int64 `xml:"inuse"` @@ -81,7 +81,9 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s tags := map[string]string{"url": hostPort, "source": host, "port": port, "type": cg.Type} - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } @@ -98,7 +100,7 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Memory.Contexts { - tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.Id, "name": c.Name} + tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.ID, "name": c.Name} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) @@ -118,15 +120,17 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s "type": cg.Type, } - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -138,21 +142,29 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full v3Stats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeUrl := addr.String() + suffix + err := func() error { + scrapeURL := addr.String() + suffix - resp, err := client.Get(scrapeUrl) - if err != nil { - return err - } + resp, err := b.client.Get(scrapeURL) + if err != nil { + return err + } - defer resp.Body.Close() + defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) - } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) + } + + if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode XML document: %s", err) + } - if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode XML document: %s", err) + return nil + }() + + if err != nil { + return err } } diff --git a/plugins/inputs/bond/README.md b/plugins/inputs/bond/README.md index abcf72c9193ca..d905038a9d533 100644 --- a/plugins/inputs/bond/README.md +++ b/plugins/inputs/bond/README.md @@ -27,6 +27,7 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. - bond_slave - failures - status + - count ### Description: @@ -39,6 +40,9 @@ status failures Amount of failures for bond's slave interface. + +count + Number of slaves attached to bond ``` ### Tags: @@ -79,7 +83,9 @@ Output: > bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000 > bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000 +> bond_slave,host=local,bond=bond1 count=2i 1509704525000000000 > bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000 > bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000 +> bond_slave,bond=bond0,host=local count=2i 1509704525000000000 ``` diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index 01f6f251be776..4f30a20e3f677 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -3,7 +3,6 @@ package bond import ( "bufio" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -53,7 +52,7 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { } for _, bondName := range bondNames { bondAbsPath := bond.HostProc + "/net/bonding/" + bondName - file, err := ioutil.ReadFile(bondAbsPath) + file, err := os.ReadFile(bondAbsPath) if err != nil { acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err)) continue @@ -122,6 +121,7 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error { var slave string var status int + var slaveCount int scanner := bufio.NewScanner(strings.NewReader(rawFile)) for scanner.Scan() { @@ -155,12 +155,18 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf. "interface": slave, } acc.AddFields("bond_slave", fields, tags) + slaveCount++ } } - if err := scanner.Err(); err != nil { - return err + fields := map[string]interface{}{ + "count": slaveCount, } - return nil + tags := map[string]string{ + "bond": bondName, + } + acc.AddFields("bond_slave", fields, tags) + + return scanner.Err() } // loadPath can be used to read path firstly from config diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index c07224350352c..8dc24f4cafa45 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var sampleTest802 = ` @@ -65,13 +66,14 @@ func TestGatherBondInterface(t *testing.T) { var acc testutil.Accumulator bond := &Bond{} - bond.gatherBondInterface("bond802", sampleTest802, &acc) + require.NoError(t, bond.gatherBondInterface("bond802", sampleTest802, &acc)) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"}) - bond.gatherBondInterface("bondAB", sampleTestAB, &acc) + require.NoError(t, bond.gatherBondInterface("bondAB", sampleTestAB, &acc)) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"count": 2}, map[string]string{"bond": "bondAB"}) } diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 501fddf16ad77..d575ea7f5eb0e 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -11,8 +11,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -73,7 +73,7 @@ type ( Servers []string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration ConcurrentConnections int APIPrefix string `toml:"api_prefix"` @@ -188,10 +188,8 @@ func (b *burrow) setDefaults() { if b.ConcurrentConnections < 1 { b.ConcurrentConnections = defaultConcurrentConnections } - if b.ResponseTimeout.Duration < time.Second { - b.ResponseTimeout = internal.Duration{ - Duration: defaultResponseTimeout, - } + if time.Duration(b.ResponseTimeout) < time.Second { + b.ResponseTimeout = config.Duration(defaultResponseTimeout) } } @@ -224,7 +222,7 @@ func (b *burrow) createClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: b.ResponseTimeout.Duration, + Timeout: time.Duration(b.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index cafbcb9408775..db58df6fc94e8 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -2,15 +2,15 @@ package burrow import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) // remap uri to json file, eg: /v3/kafka -> ./testdata/v3_kafka.json @@ -27,7 +27,7 @@ func getResponseJSON(requestURI string) ([]byte, int) { } // respond with file - b, _ := ioutil.ReadFile(jsonFile) + b, _ := os.ReadFile(jsonFile) return b, code } @@ -37,6 +37,8 @@ func getHTTPServer() *httptest.Server { body, code := getResponseJSON(r.RequestURI) w.WriteHeader(code) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive w.Write(body) })) } @@ -47,7 +49,7 @@ func getHTTPServerBasicAuth() *httptest.Server { w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) username, password, authOK := r.BasicAuth() - if authOK == false { + if !authOK { http.Error(w, "Not authorized", 401) return } @@ -61,6 +63,8 @@ func getHTTPServerBasicAuth() *httptest.Server { body, code := getResponseJSON(r.RequestURI) w.WriteHeader(code) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive w.Write(body) })) } @@ -72,7 +76,7 @@ func TestBurrowTopic(t *testing.T) { plugin := &burrow{Servers: []string{s.URL}} acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ // topicA @@ -103,7 +107,7 @@ func TestBurrowPartition(t *testing.T) { Servers: []string{s.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ { @@ -151,7 +155,7 @@ func TestBurrowGroup(t *testing.T) { Servers: []string{s.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ { @@ -189,7 +193,7 @@ func TestMultipleServers(t *testing.T) { Servers: []string{s1.URL, s2.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 14, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -205,7 +209,7 @@ func TestMultipleRuns(t *testing.T) { } for i := 0; i < 4; i++ { acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 7, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -224,7 +228,7 @@ func TestBasicAuthConfig(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 7, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -241,7 +245,7 @@ func TestFilterClusters(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) // no match by cluster require.Exactly(t, 0, len(acc.Metrics)) @@ -260,7 +264,7 @@ func TestFilterGroups(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 1, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -278,7 +282,7 @@ func TestFilterTopics(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 3, len(acc.Metrics)) require.Empty(t, acc.Errors) diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index d89459533f55e..56c36bfe93d21 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -19,10 +19,26 @@ Cassandra plugin produces one or more measurements for each metric configured, a Given a configuration like: ```toml +# Read Cassandra metrics through Jolokia [[inputs.cassandra]] + ## DEPRECATED: The cassandra plugin has been deprecated. Please use the + ## jolokia2 plugin instead. + ## + ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 + context = "/jolokia/read" - servers = [":8778"] - metrics = ["/java.lang:type=Memory/HeapMemoryUsage"] + ## List of cassandra servers exposing jolokia read service + servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] + ## List of metrics collected on above servers + ## Each metric consists of a jmx path. + ## This will collect all heap memory usage metrics from the jvm and + ## ReadLatency metrics for all keyspaces and tables. + ## "type=Table" in the query works with Cassandra3.0. Older versions might + ## need to use "type=ColumnFamily" + metrics = [ + "/java.lang:type=Memory/HeapMemoryUsage", + "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" + ] ``` The collected metrics will be: diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 6f6f86e32f592..d1c23caadc68a 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -4,8 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" - "log" + "io" "net/http" "net/url" "strings" @@ -28,9 +27,10 @@ func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error type Cassandra struct { jClient JolokiaClient - Context string - Servers []string - Metrics []string + Context string `toml:"context"` + Servers []string `toml:"servers"` + Metrics []string `toml:"metrics"` + Log telegraf.Logger `toml:"-"` } type javaMetric struct { @@ -49,13 +49,11 @@ type jmxMetric interface { addTagsFields(out map[string]interface{}) } -func newJavaMetric(host string, metric string, - acc telegraf.Accumulator) *javaMetric { +func newJavaMetric(acc telegraf.Accumulator, host string, metric string) *javaMetric { return &javaMetric{host: host, metric: metric, acc: acc} } -func newCassandraMetric(host string, metric string, - acc telegraf.Accumulator) *cassandraMetric { +func newCassandraMetric(acc telegraf.Accumulator, host string, metric string) *cassandraMetric { return &cassandraMetric{host: host, metric: metric, acc: acc} } @@ -72,13 +70,15 @@ func addValuesAsFields(values map[string]interface{}, fields map[string]interfac func parseJmxMetricRequest(mbean string) map[string]string { tokens := make(map[string]string) classAndPairs := strings.Split(mbean, ":") - if classAndPairs[0] == "org.apache.cassandra.metrics" { + switch classAndPairs[0] { + case "org.apache.cassandra.metrics": tokens["class"] = "cassandra" - } else if classAndPairs[0] == "java.lang" { + case "java.lang": tokens["class"] = "java" - } else { + default: return tokens } + pairs := strings.Split(classAndPairs[1], ",") for _, pair := range pairs { p := strings.Split(pair, "=") @@ -125,14 +125,11 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) { } j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) } else { - j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - j.metric, out)) + j.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", j.metric, out)) } } -func addCassandraMetric(mbean string, c cassandraMetric, - values map[string]interface{}) { - +func addCassandraMetric(mbean string, c cassandraMetric, values map[string]interface{}) { tags := make(map[string]string) fields := make(map[string]interface{}) tokens := parseJmxMetricRequest(mbean) @@ -140,11 +137,9 @@ func addCassandraMetric(mbean string, c cassandraMetric, tags["cassandra_host"] = c.host addValuesAsFields(values, fields, tags["mname"]) c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) - } func (c cassandraMetric) addTagsFields(out map[string]interface{}) { - r := out["request"] tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string)) @@ -152,28 +147,25 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { // maps in the json response if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" || tokens["scope"] == "*") { - if valuesMap, ok := out["value"]; ok { - for k, v := range valuesMap.(map[string]interface{}) { - addCassandraMetric(k, c, v.(map[string]interface{})) - } - } else { - c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - c.metric, out)) + valuesMap, ok := out["value"] + if !ok { + c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } + for k, v := range valuesMap.(map[string]interface{}) { + addCassandraMetric(k, c, v.(map[string]interface{})) + } } else { - if values, ok := out["value"]; ok { - addCassandraMetric(r.(map[string]interface{})["mbean"].(string), - c, values.(map[string]interface{})) - } else { - c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - c.metric, out)) + values, ok := out["value"] + if !ok { + c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } + addCassandraMetric(r.(map[string]interface{})["mbean"].(string), c, values.(map[string]interface{})) } } -func (j *Cassandra) SampleConfig() string { +func (c *Cassandra) SampleConfig() string { return ` ## DEPRECATED: The cassandra plugin has been deprecated. Please use the ## jolokia2 plugin instead. @@ -196,18 +188,18 @@ func (j *Cassandra) SampleConfig() string { ` } -func (j *Cassandra) Description() string { +func (c *Cassandra) Description() string { return "Read Cassandra metrics through Jolokia" } -func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { +func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) { // Create + send request - req, err := http.NewRequest("GET", requestUrl.String(), nil) + req, err := http.NewRequest("GET", requestURL.String(), nil) if err != nil { return nil, err } - resp, err := j.jClient.MakeRequest(req) + resp, err := c.jClient.MakeRequest(req) if err != nil { return nil, err } @@ -215,8 +207,8 @@ func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", - requestUrl, + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", + requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, @@ -225,15 +217,15 @@ func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } // Unmarshal json var jsonOut map[string]interface{} - if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { - return nil, errors.New("Error decoding JSON response") + if err = json.Unmarshal(body, &jsonOut); err != nil { + return nil, errors.New("error decoding JSON response") } return jsonOut, nil @@ -263,8 +255,8 @@ func parseServerTokens(server string) map[string]string { return serverTokens } -func (c *Cassandra) Start(acc telegraf.Accumulator) error { - log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " + +func (c *Cassandra) Start(_ telegraf.Accumulator) error { + c.Log.Warn("DEPRECATED: The cassandra plugin has been deprecated. " + "Please use the jolokia2 plugin instead. " + "https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2") return nil @@ -284,36 +276,35 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { var m jmxMetric if strings.HasPrefix(metric, "/java.lang:") { - m = newJavaMetric(serverTokens["host"], metric, acc) + m = newJavaMetric(acc, serverTokens["host"], metric) } else if strings.HasPrefix(metric, "/org.apache.cassandra.metrics:") { - m = newCassandraMetric(serverTokens["host"], metric, acc) + m = newCassandraMetric(acc, serverTokens["host"], metric) } else { // unsupported metric type - acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping", - metric)) + acc.AddError(fmt.Errorf("unsupported Cassandra metric [%s], skipping", metric)) continue } // Prepare URL - requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" + + requestURL, err := url.Parse("http://" + serverTokens["host"] + ":" + serverTokens["port"] + context + metric) if err != nil { acc.AddError(err) continue } if serverTokens["user"] != "" && serverTokens["passwd"] != "" { - requestUrl.User = url.UserPassword(serverTokens["user"], + requestURL.User = url.UserPassword(serverTokens["user"], serverTokens["passwd"]) } - out, err := c.getAttr(requestUrl) + out, err := c.getAttr(requestURL) if err != nil { acc.AddError(err) continue } if out["status"] != 200.0 { - acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl)) + acc.AddError(fmt.Errorf("provided URL returned with status %v - %s", out["status"], requestURL)) continue } m.addTagsFields(out) diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index 43a9a0c1eb105..f167f50e7187f 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -2,7 +2,7 @@ package cassandra import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -77,19 +77,6 @@ const validCassandraNestedMultiValueJSON = ` } }` -const validSingleValueJSON = ` -{ - "request":{ - "path":"used", - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":209274376, - "timestamp":1446129256, - "status":200 -}` - const validJavaMultiTypeJSON = ` { "request":{ @@ -104,8 +91,6 @@ const validJavaMultiTypeJSON = ` const invalidJSON = "I don't think this is JSON" -const empty = "" - var Servers = []string{"10.10.10.10:8778"} var AuthServers = []string{"user:passwd@10.10.10.10:8778"} var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"} @@ -121,10 +106,10 @@ type jolokiaClientStub struct { statusCode int } -func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) { +func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -198,9 +183,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) { // Test that the proper values are ignored or collected func TestHttp404(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, - []string{HeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []string{HeapMetric}) var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 171b64760654f..5d5afadc19fad 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -2,7 +2,7 @@ Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. -Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](http://docs.ceph.com/docs/mimic/mgr/telegraf/) +Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](https://docs.ceph.com/en/latest/mgr/telegraf/) *Admin Socket Stats* @@ -45,7 +45,7 @@ the cluster. The currently supported commands are: ### Configuration: ```toml -# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +# Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. [[inputs.ceph]] ## This is the recommended interval to poll. Too frequent and you will lose ## data points due to timeouts during rebalancing and recovery diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index c875de8dfaeba..efd61d56322a7 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -4,8 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" - "log" + "os" "os/exec" "path/filepath" "strings" @@ -28,17 +27,19 @@ const ( ) type Ceph struct { - CephBinary string - OsdPrefix string - MonPrefix string - MdsPrefix string - RgwPrefix string - SocketDir string - SocketSuffix string - CephUser string - CephConfig string - GatherAdminSocketStats bool - GatherClusterStats bool + CephBinary string `toml:"ceph_binary"` + OsdPrefix string `toml:"osd_prefix"` + MonPrefix string `toml:"mon_prefix"` + MdsPrefix string `toml:"mds_prefix"` + RgwPrefix string `toml:"rgw_prefix"` + SocketDir string `toml:"socket_dir"` + SocketSuffix string `toml:"socket_suffix"` + CephUser string `toml:"ceph_user"` + CephConfig string `toml:"ceph_config"` + GatherAdminSocketStats bool `toml:"gather_admin_socket_stats"` + GatherClusterStats bool `toml:"gather_cluster_stats"` + + Log telegraf.Logger `toml:"-"` } func (c *Ceph) Description() string { @@ -67,7 +68,14 @@ var sampleConfig = ` ## suffix used to identify socket files socket_suffix = "asok" - ## Ceph user to authenticate as + ## Ceph user to authenticate as, ceph will search for the corresponding keyring + ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the + ## client section of ceph.conf for example: + ## + ## [client.telegraf] + ## keyring = /etc/ceph/client.telegraf.keyring + ## + ## Consult the ceph documentation for more detail on keyring generation. ceph_user = "client.admin" ## Ceph configuration to use to locate the cluster @@ -76,7 +84,8 @@ var sampleConfig = ` ## Whether to gather statistics via the admin socket gather_admin_socket_stats = true - ## Whether to gather statistics via ceph commands + ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config + ## to be specified gather_cluster_stats = false ` @@ -112,15 +121,15 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { acc.AddError(fmt.Errorf("error reading from socket '%s': %v", s.socket, err)) continue } - data, err := parseDump(dump) + data, err := c.parseDump(dump) if err != nil { acc.AddError(fmt.Errorf("error parsing dump from socket '%s': %v", s.socket, err)) continue } for tag, metrics := range data { acc.AddFields(measurement, - map[string]interface{}(metrics), - map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag}) + metrics, + map[string]string{"type": s.sockType, "id": s.sockID, "collection": tag}) } } return nil @@ -138,7 +147,7 @@ func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error { // For each job, execute against the cluster, parse and accumulate the data points for _, job := range jobs { - output, err := c.exec(job.command) + output, err := c.execute(job.command) if err != nil { return fmt.Errorf("error executing command: %v", err) } @@ -171,15 +180,17 @@ func init() { var perfDump = func(binary string, socket *socket) (string, error) { cmdArgs := []string{"--admin-daemon", socket.socket} - if socket.sockType == typeOsd { + + switch socket.sockType { + case typeOsd: cmdArgs = append(cmdArgs, "perf", "dump") - } else if socket.sockType == typeMon { + case typeMon: cmdArgs = append(cmdArgs, "perfcounters_dump") - } else if socket.sockType == typeMds { + case typeMds: cmdArgs = append(cmdArgs, "perf", "dump") - } else if socket.sockType == typeRgw { + case typeRgw: cmdArgs = append(cmdArgs, "perf", "dump") - } else { + default: return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType) } @@ -195,7 +206,7 @@ var perfDump = func(binary string, socket *socket) (string, error) { } var findSockets = func(c *Ceph) ([]*socket, error) { - listing, err := ioutil.ReadDir(c.SocketDir) + listing, err := os.ReadDir(c.SocketDir) if err != nil { return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err) } @@ -211,28 +222,25 @@ var findSockets = func(c *Ceph) ([]*socket, error) { if strings.HasPrefix(f, c.OsdPrefix) { sockType = typeOsd sockPrefix = osdPrefix - } if strings.HasPrefix(f, c.MdsPrefix) { sockType = typeMds sockPrefix = mdsPrefix - } if strings.HasPrefix(f, c.RgwPrefix) { sockType = typeRgw sockPrefix = rgwPrefix - } if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw { path := filepath.Join(c.SocketDir, f) - sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path}) + sockets = append(sockets, &socket{parseSockID(f, sockPrefix, c.SocketSuffix), sockType, path}) } } return sockets, nil } -func parseSockId(fname, prefix, suffix string) string { +func parseSockID(fname, prefix, suffix string) string { s := fname s = strings.TrimPrefix(s, prefix) s = strings.TrimSuffix(s, suffix) @@ -241,7 +249,7 @@ func parseSockId(fname, prefix, suffix string) string { } type socket struct { - sockId string + sockID string sockType string socket string } @@ -256,8 +264,10 @@ func (m *metric) name() string { buf := bytes.Buffer{} for i := len(m.pathStack) - 1; i >= 0; i-- { if buf.Len() > 0 { + //nolint:errcheck,revive // should never return an error buf.WriteString(".") } + //nolint:errcheck,revive // should never return an error buf.WriteString(m.pathStack[i]) } return buf.String() @@ -269,23 +279,23 @@ type taggedMetricMap map[string]metricMap // Parses a raw JSON string into a taggedMetricMap // Delegates the actual parsing to newTaggedMetricMap(..) -func parseDump(dump string) (taggedMetricMap, error) { +func (c *Ceph) parseDump(dump string) (taggedMetricMap, error) { data := make(map[string]interface{}) err := json.Unmarshal([]byte(dump), &data) if err != nil { return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err) } - return newTaggedMetricMap(data), nil + return c.newTaggedMetricMap(data), nil } // Builds a TaggedMetricMap out of a generic string map. // The top-level key is used as a tag and all sub-keys are flattened into metrics -func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { +func (c *Ceph) newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { tmm := make(taggedMetricMap) for tag, datapoints := range data { mm := make(metricMap) - for _, m := range flatten(datapoints) { + for _, m := range c.flatten(datapoints) { mm[m.name()] = m.value } tmm[tag] = mm @@ -297,7 +307,7 @@ func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { // Nested keys are flattened into ordered slices associated with a metric value. // The key slices are treated as stacks, and are expected to be reversed and concatenated // when passed as metrics to the accumulator. (see (*metric).name()) -func flatten(data interface{}) []*metric { +func (c *Ceph) flatten(data interface{}) []*metric { var metrics []*metric switch val := data.(type) { @@ -306,20 +316,20 @@ func flatten(data interface{}) []*metric { case map[string]interface{}: metrics = make([]*metric, 0, len(val)) for k, v := range val { - for _, m := range flatten(v) { + for _, m := range c.flatten(v) { m.pathStack = append(m.pathStack, k) metrics = append(metrics, m) } } default: - log.Printf("I! [inputs.ceph] ignoring unexpected type '%T' for value %v", val, val) + c.Log.Infof("ignoring unexpected type '%T' for value %v", val, val) } return metrics } -// exec executes the 'ceph' command with the supplied arguments, returning JSON formatted output -func (c *Ceph) exec(command string) (string, error) { +// execute executes the 'ceph' command with the supplied arguments, returning JSON formatted output +func (c *Ceph) execute(command string) (string, error) { cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"} cmdArgs = append(cmdArgs, strings.Split(command, " ")...) diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 78da3438de691..7915d6dd695f4 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -2,15 +2,15 @@ package ceph import ( "fmt" - "io/ioutil" "os" - "path" + "path/filepath" "strconv" "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) const ( @@ -24,42 +24,46 @@ type expectedResult struct { } func TestParseSockId(t *testing.T) { - s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) - assert.Equal(t, s, "1") + s := parseSockID(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) + require.Equal(t, s, "1") } func TestParseMonDump(t *testing.T) { - dump, err := parseDump(monPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon) - assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(monPerfDump) + require.NoError(t, err) + require.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon) + require.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon) } func TestParseOsdDump(t *testing.T) { - dump, err := parseDump(osdPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon) - assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(osdPerfDump) + require.NoError(t, err) + require.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon) + require.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) } func TestParseMdsDump(t *testing.T) { - dump, err := parseDump(mdsPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) - assert.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(mdsPerfDump) + require.NoError(t, err) + require.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) + require.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) } func TestParseRgwDump(t *testing.T) { - dump, err := parseDump(rgwPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) - assert.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(rgwPerfDump) + require.NoError(t, err) + require.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) + require.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) } func TestDecodeStatus(t *testing.T) { acc := &testutil.Accumulator{} err := decodeStatus(acc, clusterStatusDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephStatusResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -69,7 +73,7 @@ func TestDecodeStatus(t *testing.T) { func TestDecodeDf(t *testing.T) { acc := &testutil.Accumulator{} err := decodeDf(acc, cephDFDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephDfResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -79,7 +83,7 @@ func TestDecodeDf(t *testing.T) { func TestDecodeOSDPoolStats(t *testing.T) { acc := &testutil.Accumulator{} err := decodeOsdPoolStats(acc, cephODSPoolStatsDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephOSDPoolStatsResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -104,16 +108,15 @@ func TestGather(t *testing.T) { acc := &testutil.Accumulator{} c := &Ceph{} - c.Gather(acc) - + require.NoError(t, c.Gather(acc)) } func TestFindSockets(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "socktest") - assert.NoError(t, err) + tmpdir, err := os.MkdirTemp("", "socktest") + require.NoError(t, err) defer func() { err := os.Remove(tmpdir) - assert.NoError(t, err) + require.NoError(t, err) }() c := &Ceph{ CephBinary: "foo", @@ -130,10 +133,10 @@ func TestFindSockets(t *testing.T) { } for _, st := range sockTestParams { - createTestFiles(tmpdir, st) + require.NoError(t, createTestFiles(tmpdir, st)) sockets, err := findSockets(c) - assert.NoError(t, err) + require.NoError(t, err) for i := 1; i <= st.osds; i++ { assertFoundSocket(t, tmpdir, typeOsd, i, sockets) @@ -148,7 +151,7 @@ func TestFindSockets(t *testing.T) { for i := 1; i <= st.rgws; i++ { assertFoundSocket(t, tmpdir, typeRgw, i, sockets) } - cleanupTestFiles(tmpdir, st) + require.NoError(t, cleanupTestFiles(tmpdir, st)) } } @@ -163,57 +166,64 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc } else { prefix = monPrefix } - expected := path.Join(dir, sockFile(prefix, i)) + expected := filepath.Join(dir, sockFile(prefix, i)) found := false for _, s := range sockets { - fmt.Printf("Checking %s\n", s.socket) + _, err := fmt.Printf("Checking %s\n", s.socket) + require.NoError(t, err) if s.socket == expected { found = true - assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) - assert.Equal(t, s.sockId, strconv.Itoa(i)) + require.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) + require.Equal(t, s.sockID, strconv.Itoa(i)) } } - assert.True(t, found, "Did not find socket: %s", expected) + require.True(t, found, "Did not find socket: %s", expected) } func sockFile(prefix string, i int) string { return strings.Join([]string{prefix, strconv.Itoa(i), sockSuffix}, ".") } -func createTestFiles(dir string, st *SockTest) { - writeFile := func(prefix string, i int) { +func createTestFiles(dir string, st *SockTest) error { + writeFile := func(prefix string, i int) error { f := sockFile(prefix, i) - fpath := path.Join(dir, f) - ioutil.WriteFile(fpath, []byte(""), 0777) + fpath := filepath.Join(dir, f) + return os.WriteFile(fpath, []byte(""), 0777) } - tstFileApply(st, writeFile) + return tstFileApply(st, writeFile) } -func cleanupTestFiles(dir string, st *SockTest) { - rmFile := func(prefix string, i int) { +func cleanupTestFiles(dir string, st *SockTest) error { + rmFile := func(prefix string, i int) error { f := sockFile(prefix, i) - fpath := path.Join(dir, f) - err := os.Remove(fpath) - if err != nil { - fmt.Printf("Error removing test file %s: %v\n", fpath, err) - } + fpath := filepath.Join(dir, f) + return os.Remove(fpath) } - tstFileApply(st, rmFile) + return tstFileApply(st, rmFile) } -func tstFileApply(st *SockTest, fn func(prefix string, i int)) { +func tstFileApply(st *SockTest, fn func(string, int) error) error { for i := 1; i <= st.osds; i++ { - fn(osdPrefix, i) + if err := fn(osdPrefix, i); err != nil { + return err + } } for i := 1; i <= st.mons; i++ { - fn(monPrefix, i) + if err := fn(monPrefix, i); err != nil { + return err + } } for i := 1; i <= st.mdss; i++ { - fn(mdsPrefix, i) + if err := fn(mdsPrefix, i); err != nil { + return err + } } for i := 1; i <= st.rgws; i++ { - fn(rgwPrefix, i) + if err := fn(rgwPrefix, i); err != nil { + return err + } } + return nil } type SockTest struct { diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md index 6982517bc5879..7d0eede0f7f10 100644 --- a/plugins/inputs/cgroup/README.md +++ b/plugins/inputs/cgroup/README.md @@ -27,11 +27,11 @@ VAL1\n VAL0 VAL1 ...\n ``` -* New line separated key-space-value's +* Space separated keys and value, separated by new line ``` -KEY0 VAL0\n -KEY1 VAL1\n +KEY0 ... VAL0\n +KEY1 ... VAL1\n ``` @@ -44,12 +44,19 @@ All measurements have the following tags: ### Configuration: ```toml +# Read specific statistics per cgroup # [[inputs.cgroup]] + ## Directories in which to look for files, globs are supported. + ## Consider restricting paths to the set of cgroups you really + ## want to monitor if you have a large number of cgroups, to avoid + ## any cardinality issues. # paths = [ - # "/sys/fs/cgroup/memory", # root cgroup - # "/sys/fs/cgroup/memory/child1", # container cgroup - # "/sys/fs/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself + # "/sys/fs/cgroup/memory", + # "/sys/fs/cgroup/memory/child1", + # "/sys/fs/cgroup/memory/child2/*", # ] + ## cgroup stat fields, as file names, globs are supported. + ## these file names are appended to each path from above. # files = ["memory.*usage*", "memory.limit_in_bytes"] ``` diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index bb38525b7a8f5..b892f528c234f 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -1,15 +1,16 @@ +//go:build linux // +build linux package cgroup import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" "regexp" "strconv" + "strings" "github.com/influxdata/telegraf" ) @@ -25,7 +26,7 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error { acc.AddError(dir.err) continue } - if err := g.gatherDir(dir.path, acc); err != nil { + if err := g.gatherDir(acc, dir.path); err != nil { acc.AddError(err) } } @@ -33,7 +34,7 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error { return nil } -func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error { +func (g *CGroup) gatherDir(acc telegraf.Accumulator, dir string) error { fields := make(map[string]interface{}) list := make(chan pathInfo) @@ -44,7 +45,7 @@ func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error { return file.err } - raw, err := ioutil.ReadFile(file.path) + raw, err := os.ReadFile(file.path) if err != nil { return err } @@ -72,8 +73,8 @@ type pathInfo struct { err error } -func isDir(path string) (bool, error) { - result, err := os.Stat(path) +func isDir(pathToCheck string) (bool, error) { + result, err := os.Stat(pathToCheck) if err != nil { return false, err } @@ -168,7 +169,7 @@ type fileFormat struct { parser func(measurement string, fields map[string]interface{}, b []byte) } -const keyPattern = "[[:alpha:]_]+" +const keyPattern = "[[:alnum:]:_]+" const valuePattern = "[\\d-]+" var fileFormats = [...]fileFormat{ @@ -208,17 +209,18 @@ var fileFormats = [...]fileFormat{ } }, }, - // KEY0 VAL0\n - // KEY1 VAL1\n + // KEY0 ... VAL0\n + // KEY1 ... VAL1\n // ... { - name: "New line separated key-space-value's", - pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$", + name: "Space separated keys and value, separated by new line", + pattern: "^((" + keyPattern + " )+" + valuePattern + "\n)+$", parser: func(measurement string, fields map[string]interface{}, b []byte) { - re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n") + re := regexp.MustCompile("((?:" + keyPattern + " ?)+) (" + valuePattern + ")\n") matches := re.FindAllStringSubmatch(string(b), -1) for _, v := range matches { - fields[measurement+"."+v[1]] = numberOrString(v[2]) + k := strings.ReplaceAll(v[1], " ", ".") + fields[measurement+"."+k] = numberOrString(v[2]) } }, }, diff --git a/plugins/inputs/cgroup/cgroup_notlinux.go b/plugins/inputs/cgroup/cgroup_notlinux.go index 2bc227410a6e2..1c9c08ec41ac5 100644 --- a/plugins/inputs/cgroup/cgroup_notlinux.go +++ b/plugins/inputs/cgroup/cgroup_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package cgroup diff --git a/plugins/inputs/cgroup/cgroup_test.go b/plugins/inputs/cgroup/cgroup_test.go index b3094baef31ae..ba74247eeb1f3 100644 --- a/plugins/inputs/cgroup/cgroup_test.go +++ b/plugins/inputs/cgroup/cgroup_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cgroup @@ -180,3 +181,155 @@ func TestCgroupStatistics_6(t *testing.T) { } acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } + +// ====================================================================== + +var cg7 = &CGroup{ + Paths: []string{"testdata/blkio"}, + Files: []string{"blkio.throttle.io_serviced"}, +} + +func TestCgroupStatistics_7(t *testing.T) { + var acc testutil.Accumulator + + err := acc.GatherError(cg7.Gather) + require.NoError(t, err) + + tags := map[string]string{ + "path": "testdata/blkio", + } + fields := map[string]interface{}{ + "blkio.throttle.io_serviced.11:0.Read": int64(0), + "blkio.throttle.io_serviced.11:0.Write": int64(0), + "blkio.throttle.io_serviced.11:0.Sync": int64(0), + "blkio.throttle.io_serviced.11:0.Async": int64(0), + "blkio.throttle.io_serviced.11:0.Total": int64(0), + "blkio.throttle.io_serviced.8:0.Read": int64(49134), + "blkio.throttle.io_serviced.8:0.Write": int64(216703), + "blkio.throttle.io_serviced.8:0.Sync": int64(177906), + "blkio.throttle.io_serviced.8:0.Async": int64(87931), + "blkio.throttle.io_serviced.8:0.Total": int64(265837), + "blkio.throttle.io_serviced.7:7.Read": int64(0), + "blkio.throttle.io_serviced.7:7.Write": int64(0), + "blkio.throttle.io_serviced.7:7.Sync": int64(0), + "blkio.throttle.io_serviced.7:7.Async": int64(0), + "blkio.throttle.io_serviced.7:7.Total": int64(0), + "blkio.throttle.io_serviced.7:6.Read": int64(0), + "blkio.throttle.io_serviced.7:6.Write": int64(0), + "blkio.throttle.io_serviced.7:6.Sync": int64(0), + "blkio.throttle.io_serviced.7:6.Async": int64(0), + "blkio.throttle.io_serviced.7:6.Total": int64(0), + "blkio.throttle.io_serviced.7:5.Read": int64(0), + "blkio.throttle.io_serviced.7:5.Write": int64(0), + "blkio.throttle.io_serviced.7:5.Sync": int64(0), + "blkio.throttle.io_serviced.7:5.Async": int64(0), + "blkio.throttle.io_serviced.7:5.Total": int64(0), + "blkio.throttle.io_serviced.7:4.Read": int64(0), + "blkio.throttle.io_serviced.7:4.Write": int64(0), + "blkio.throttle.io_serviced.7:4.Sync": int64(0), + "blkio.throttle.io_serviced.7:4.Async": int64(0), + "blkio.throttle.io_serviced.7:4.Total": int64(0), + "blkio.throttle.io_serviced.7:3.Read": int64(0), + "blkio.throttle.io_serviced.7:3.Write": int64(0), + "blkio.throttle.io_serviced.7:3.Sync": int64(0), + "blkio.throttle.io_serviced.7:3.Async": int64(0), + "blkio.throttle.io_serviced.7:3.Total": int64(0), + "blkio.throttle.io_serviced.7:2.Read": int64(0), + "blkio.throttle.io_serviced.7:2.Write": int64(0), + "blkio.throttle.io_serviced.7:2.Sync": int64(0), + "blkio.throttle.io_serviced.7:2.Async": int64(0), + "blkio.throttle.io_serviced.7:2.Total": int64(0), + "blkio.throttle.io_serviced.7:1.Read": int64(0), + "blkio.throttle.io_serviced.7:1.Write": int64(0), + "blkio.throttle.io_serviced.7:1.Sync": int64(0), + "blkio.throttle.io_serviced.7:1.Async": int64(0), + "blkio.throttle.io_serviced.7:1.Total": int64(0), + "blkio.throttle.io_serviced.7:0.Read": int64(0), + "blkio.throttle.io_serviced.7:0.Write": int64(0), + "blkio.throttle.io_serviced.7:0.Sync": int64(0), + "blkio.throttle.io_serviced.7:0.Async": int64(0), + "blkio.throttle.io_serviced.7:0.Total": int64(0), + "blkio.throttle.io_serviced.1:15.Read": int64(3), + "blkio.throttle.io_serviced.1:15.Write": int64(0), + "blkio.throttle.io_serviced.1:15.Sync": int64(0), + "blkio.throttle.io_serviced.1:15.Async": int64(3), + "blkio.throttle.io_serviced.1:15.Total": int64(3), + "blkio.throttle.io_serviced.1:14.Read": int64(3), + "blkio.throttle.io_serviced.1:14.Write": int64(0), + "blkio.throttle.io_serviced.1:14.Sync": int64(0), + "blkio.throttle.io_serviced.1:14.Async": int64(3), + "blkio.throttle.io_serviced.1:14.Total": int64(3), + "blkio.throttle.io_serviced.1:13.Read": int64(3), + "blkio.throttle.io_serviced.1:13.Write": int64(0), + "blkio.throttle.io_serviced.1:13.Sync": int64(0), + "blkio.throttle.io_serviced.1:13.Async": int64(3), + "blkio.throttle.io_serviced.1:13.Total": int64(3), + "blkio.throttle.io_serviced.1:12.Read": int64(3), + "blkio.throttle.io_serviced.1:12.Write": int64(0), + "blkio.throttle.io_serviced.1:12.Sync": int64(0), + "blkio.throttle.io_serviced.1:12.Async": int64(3), + "blkio.throttle.io_serviced.1:12.Total": int64(3), + "blkio.throttle.io_serviced.1:11.Read": int64(3), + "blkio.throttle.io_serviced.1:11.Write": int64(0), + "blkio.throttle.io_serviced.1:11.Sync": int64(0), + "blkio.throttle.io_serviced.1:11.Async": int64(3), + "blkio.throttle.io_serviced.1:11.Total": int64(3), + "blkio.throttle.io_serviced.1:10.Read": int64(3), + "blkio.throttle.io_serviced.1:10.Write": int64(0), + "blkio.throttle.io_serviced.1:10.Sync": int64(0), + "blkio.throttle.io_serviced.1:10.Async": int64(3), + "blkio.throttle.io_serviced.1:10.Total": int64(3), + "blkio.throttle.io_serviced.1:9.Read": int64(3), + "blkio.throttle.io_serviced.1:9.Write": int64(0), + "blkio.throttle.io_serviced.1:9.Sync": int64(0), + "blkio.throttle.io_serviced.1:9.Async": int64(3), + "blkio.throttle.io_serviced.1:9.Total": int64(3), + "blkio.throttle.io_serviced.1:8.Read": int64(3), + "blkio.throttle.io_serviced.1:8.Write": int64(0), + "blkio.throttle.io_serviced.1:8.Sync": int64(0), + "blkio.throttle.io_serviced.1:8.Async": int64(3), + "blkio.throttle.io_serviced.1:8.Total": int64(3), + "blkio.throttle.io_serviced.1:7.Read": int64(3), + "blkio.throttle.io_serviced.1:7.Write": int64(0), + "blkio.throttle.io_serviced.1:7.Sync": int64(0), + "blkio.throttle.io_serviced.1:7.Async": int64(3), + "blkio.throttle.io_serviced.1:7.Total": int64(3), + "blkio.throttle.io_serviced.1:6.Read": int64(3), + "blkio.throttle.io_serviced.1:6.Write": int64(0), + "blkio.throttle.io_serviced.1:6.Sync": int64(0), + "blkio.throttle.io_serviced.1:6.Async": int64(3), + "blkio.throttle.io_serviced.1:6.Total": int64(3), + "blkio.throttle.io_serviced.1:5.Read": int64(3), + "blkio.throttle.io_serviced.1:5.Write": int64(0), + "blkio.throttle.io_serviced.1:5.Sync": int64(0), + "blkio.throttle.io_serviced.1:5.Async": int64(3), + "blkio.throttle.io_serviced.1:5.Total": int64(3), + "blkio.throttle.io_serviced.1:4.Read": int64(3), + "blkio.throttle.io_serviced.1:4.Write": int64(0), + "blkio.throttle.io_serviced.1:4.Sync": int64(0), + "blkio.throttle.io_serviced.1:4.Async": int64(3), + "blkio.throttle.io_serviced.1:4.Total": int64(3), + "blkio.throttle.io_serviced.1:3.Read": int64(3), + "blkio.throttle.io_serviced.1:3.Write": int64(0), + "blkio.throttle.io_serviced.1:3.Sync": int64(0), + "blkio.throttle.io_serviced.1:3.Async": int64(3), + "blkio.throttle.io_serviced.1:3.Total": int64(3), + "blkio.throttle.io_serviced.1:2.Read": int64(3), + "blkio.throttle.io_serviced.1:2.Write": int64(0), + "blkio.throttle.io_serviced.1:2.Sync": int64(0), + "blkio.throttle.io_serviced.1:2.Async": int64(3), + "blkio.throttle.io_serviced.1:2.Total": int64(3), + "blkio.throttle.io_serviced.1:1.Read": int64(3), + "blkio.throttle.io_serviced.1:1.Write": int64(0), + "blkio.throttle.io_serviced.1:1.Sync": int64(0), + "blkio.throttle.io_serviced.1:1.Async": int64(3), + "blkio.throttle.io_serviced.1:1.Total": int64(3), + "blkio.throttle.io_serviced.1:0.Read": int64(3), + "blkio.throttle.io_serviced.1:0.Write": int64(0), + "blkio.throttle.io_serviced.1:0.Sync": int64(0), + "blkio.throttle.io_serviced.1:0.Async": int64(3), + "blkio.throttle.io_serviced.1:0.Total": int64(3), + "blkio.throttle.io_serviced.Total": int64(265885), + } + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) +} diff --git a/plugins/inputs/chrony/chrony_test.go b/plugins/inputs/chrony/chrony_test.go index a5fd9dd028e57..01f5f458dd738 100644 --- a/plugins/inputs/chrony/chrony_test.go +++ b/plugins/inputs/chrony/chrony_test.go @@ -49,10 +49,9 @@ func TestGather(t *testing.T) { t.Fatal(err) } acc.AssertContainsTaggedFields(t, "chrony", fields, tags) - } -// fackeExecCommand is a helper function that mock +// fakeExecCommand is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} @@ -66,7 +65,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -95,14 +94,18 @@ Leap status : Not synchronized if cmd == "chronyc" { if args[0] == "tracking" { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, lookup+mockData) } else { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, noLookup+mockData) } } else { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) - } + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index 9c4eb3645d491..f4ca7243b8cde 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -2,7 +2,7 @@ Cisco model-driven telemetry (MDT) is an input plugin that consumes telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports. -GRPC-based transport can utilize TLS for authentication and encryption. +RPC-based transport can utilize TLS for authentication and encryption. Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded. The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms. @@ -21,6 +21,9 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ## Address and port to host telemetry listener service_address = ":57000" + ## Grpc Maximum Message Size, default is 4MB, increase the size. + max_msg_size = 20000000 + ## Enable TLS; grpc transport only. # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -35,6 +38,19 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ## Define aliases to map telemetry encoding paths to simple measurement names [inputs.cisco_telemetry_mdt.aliases] ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + [inputs.cisco_telemetry_mdt.dmes] +# Global Property Xformation. +# prop1 = "uint64 to int" +# prop2 = "uint64 to string" +# prop3 = "string to uint64" +# prop4 = "string to int64" +# prop5 = "string to float64" +# auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# Per Path configuration is better as it avoid property collision issue of types. +# dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' ``` ### Example Output: @@ -42,3 +58,47 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 ``` + +### NX-OS Configuration Example: +``` +Requirement DATA-SOURCE Configuration +----------------------------------------- +Environment DME path sys/ch query-condition query-target=subtree&target-subtree-class=eqptPsuSlot,eqptFtSlot,eqptSupCSlot,eqptPsu,eqptFt,eqptSensor,eqptLCSlot + DME path sys/ch depth 5 (Another configuration option) +Environment NXAPI show environment power + NXAPI show environment fan + NXAPI show environment temperature +Interface Stats DME path sys/intf query-condition query-target=subtree&target-subtree-class=rmonIfIn,rmonIfOut,rmonIfHCIn,rmonIfHCOut,rmonEtherStats +Interface State DME path sys/intf depth unbounded query-condition query-target=subtree&target-subtree-class=l1PhysIf,pcAggrIf,l3EncRtdIf,l3LbRtdIf,ethpmPhysIf +VPC DME path sys/vpc query-condition query-target=subtree&target-subtree-class=vpcDom,vpcIf +Resources cpu DME path sys/procsys query-condition query-target=subtree&target-subtree-class=procSystem,procSysCore,procSysCpuSummary,procSysCpu,procIdle,procIrq,procKernel,procNice,procSoftirq,procTotal,procUser,procWait,procSysCpuHistory,procSysLoad +Resources Mem DME path sys/procsys/sysmem/sysmemused + path sys/procsys/sysmem/sysmemusage + path sys/procsys/sysmem/sysmemfree +Per Process cpu DME path sys/proc depth unbounded query-condition rsp-foreign-subtree=ephemeral +vxlan(svi stats) DME path sys/bd query-condition query-target=subtree&target-subtree-class=l2VlanStats +BGP DME path sys/bgp query-condition query-target=subtree&target-subtree-class=bgpDom,bgpPeer,bgpPeerAf,bgpDomAf,bgpPeerAfEntry,bgpOperRtctrlL3,bgpOperRttP,bgpOperRttEntry,bgpOperAfCtrl +mac dynamic DME path sys/mac query-condition query-target=subtree&target-subtree-class=l2MacAddressTable +bfd DME path sys/bfd/inst depth unbounded +lldp DME path sys/lldp depth unbounded +urib DME path sys/urib depth unbounded query-condition rsp-foreign-subtree=ephemeral +u6rib DME path sys/u6rib depth unbounded query-condition rsp-foreign-subtree=ephemeral +multicast flow DME path sys/mca/show/flows depth unbounded +multicast stats DME path sys/mca/show/stats depth unbounded +multicast igmp NXAPI show ip igmp groups vrf all +multicast igmp NXAPI show ip igmp interface vrf all +multicast igmp NXAPI show ip igmp snooping +multicast igmp NXAPI show ip igmp snooping groups +multicast igmp NXAPI show ip igmp snooping groups detail +multicast igmp NXAPI show ip igmp snooping groups summary +multicast igmp NXAPI show ip igmp snooping mrouter +multicast igmp NXAPI show ip igmp snooping statistics +multicast pim NXAPI show ip pim interface vrf all +multicast pim NXAPI show ip pim neighbor vrf all +multicast pim NXAPI show ip pim route vrf all +multicast pim NXAPI show ip pim rp vrf all +multicast pim NXAPI show ip pim statistics vrf all +multicast pim NXAPI show ip pim vrf all + + +``` diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 1a669e96f878e..25b5ec9758962 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -3,6 +3,7 @@ package cisco_telemetry_mdt import ( "bytes" "encoding/binary" + "encoding/json" "fmt" "io" "net" @@ -14,15 +15,16 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + _ "google.golang.org/grpc/encoding/gzip" // Register GRPC gzip decoder to support compressed telemetry + "google.golang.org/grpc/peer" + "google.golang.org/protobuf/proto" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" // Register GRPC gzip decoder to support compressed telemetry - _ "google.golang.org/grpc/encoding/gzip" - "google.golang.org/grpc/peer" ) const ( @@ -37,6 +39,7 @@ type CiscoTelemetryMDT struct { ServiceAddress string `toml:"service_address"` MaxMsgSize int `toml:"max_msg_size"` Aliases map[string]string `toml:"aliases"` + Dmes map[string]string `toml:"dmes"` EmbeddedTags []string `toml:"embedded_tags"` Log telegraf.Logger @@ -49,12 +52,26 @@ type CiscoTelemetryMDT struct { listener net.Listener // Internal state - aliases map[string]string - warned map[string]struct{} - extraTags map[string]map[string]struct{} - mutex sync.Mutex - acc telegraf.Accumulator - wg sync.WaitGroup + internalAliases map[string]string + dmesFuncs map[string]string + warned map[string]struct{} + extraTags map[string]map[string]struct{} + nxpathMap map[string]map[string]string //per path map + propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{} + mutex sync.Mutex + acc telegraf.Accumulator + wg sync.WaitGroup + + // Though unused in the code, required by protoc-gen-go-grpc to maintain compatibility + dialout.UnimplementedGRPCMdtDialoutServer +} + +type NxPayloadXfromStructure struct { + Name string `json:"Name"` + Prop []struct { + Key string `json:"Key"` + Value string `json:"Value"` + } `json:"prop"` } // Start the Cisco MDT service @@ -66,11 +83,54 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { return err } + c.propMap = make(map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}, 100) + c.propMap["test"] = nxosValueXformUint64Toint64 + c.propMap["asn"] = nxosValueXformUint64ToString //uint64 to string. + c.propMap["subscriptionId"] = nxosValueXformUint64ToString //uint64 to string. + c.propMap["operState"] = nxosValueXformUint64ToString //uint64 to string. + // Invert aliases list c.warned = make(map[string]struct{}) - c.aliases = make(map[string]string, len(c.Aliases)) - for alias, path := range c.Aliases { - c.aliases[path] = alias + c.internalAliases = make(map[string]string, len(c.Aliases)) + for alias, encodingPath := range c.Aliases { + c.internalAliases[encodingPath] = alias + } + c.initDb() + + c.dmesFuncs = make(map[string]string, len(c.Dmes)) + for dme, dmeKey := range c.Dmes { + c.dmesFuncs[dmeKey] = dme + switch dmeKey { + case "uint64 to int": + c.propMap[dme] = nxosValueXformUint64Toint64 + case "uint64 to string": + c.propMap[dme] = nxosValueXformUint64ToString + case "string to float64": + c.propMap[dme] = nxosValueXformStringTofloat + case "string to uint64": + c.propMap[dme] = nxosValueXformStringToUint64 + case "string to int64": + c.propMap[dme] = nxosValueXformStringToInt64 + case "auto-float-xfrom": + c.propMap[dme] = nxosValueAutoXformFloatProp + default: + if !strings.HasPrefix(dme, "dnpath") { // not path based property map + continue + } + + var jsStruct NxPayloadXfromStructure + err := json.Unmarshal([]byte(dmeKey), &jsStruct) + if err != nil { + continue + } + + // Build 2 level Hash nxpathMap Key = jsStruct.Name, Value = map of jsStruct.Prop + // It will override the default of code if same path is provided in configuration. + c.nxpathMap[jsStruct.Name] = make(map[string]string, len(jsStruct.Prop)) + for _, prop := range jsStruct.Prop { + c.nxpathMap[jsStruct.Name][prop.Key] = prop.Value + } + } } // Fill extra tags @@ -96,6 +156,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { var opts []grpc.ServerOption tlsConfig, err := c.ServerConfig.TLSConfig() if err != nil { + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() return err } else if tlsConfig != nil { @@ -111,11 +172,14 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { c.wg.Add(1) go func() { - c.grpcServer.Serve(c.listener) + if err := c.grpcServer.Serve(c.listener); err != nil { + c.Log.Errorf("serving GRPC server failed: %v", err) + } c.wg.Done() }() default: + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() return fmt.Errorf("invalid Cisco MDT transport: %s", c.Transport) } @@ -154,7 +218,9 @@ func (c *CiscoTelemetryMDT) acceptTCPClients() { delete(clients, conn) mutex.Unlock() - conn.Close() + if err := conn.Close(); err != nil { + c.Log.Warnf("closing connection failed: %v", err) + } c.wg.Done() }() } @@ -214,9 +280,9 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error { // MdtDialout RPC server method for grpc-dialout transport func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error { - peer, peerOK := peer.FromContext(stream.Context()) + peerInCtx, peerOK := peer.FromContext(stream.Context()) if peerOK { - c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr) } var chunkBuffer bytes.Buffer @@ -239,7 +305,9 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS if packet.TotalSize == 0 { c.handleTelemetry(packet.Data) } else if int(packet.TotalSize) <= c.MaxMsgSize { - chunkBuffer.Write(packet.Data) + if _, err := chunkBuffer.Write(packet.Data); err != nil { + c.acc.AddError(fmt.Errorf("writing packet %q failed: %v", packet.Data, err)) + } if chunkBuffer.Len() >= int(packet.TotalSize) { c.handleTelemetry(chunkBuffer.Bytes()) chunkBuffer.Reset() @@ -250,7 +318,7 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS } if peerOK { - c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr) } return nil @@ -261,7 +329,7 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { msg := &telemetry.Telemetry{} err := proto.Unmarshal(data, msg) if err != nil { - c.acc.AddError(fmt.Errorf("Cisco MDT failed to decode: %v", err)) + c.acc.AddError(fmt.Errorf("failed to decode: %v", err)) return } @@ -296,7 +364,9 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { // Parse keys tags = make(map[string]string, len(keys.Fields)+3) tags["source"] = msg.GetNodeIdStr() - tags["subscription"] = msg.GetSubscriptionIdStr() + if msgID := msg.GetSubscriptionIdStr(); msgID != "" { + tags["subscription"] = msgID + } tags["path"] = msg.GetEncodingPath() for _, subfield := range keys.Fields { @@ -309,8 +379,8 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { } } - for _, metric := range grouper.Metrics() { - c.acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + c.acc.AddMetric(groupedMetric) } } @@ -391,32 +461,109 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet } } +func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, + encodingPath string, tags map[string]string, timestamp time.Time) { + // RIB + measurement := encodingPath + for _, subfield := range field.Fields { + //For Every table fill the keys which are vrfName, address and masklen + switch subfield.Name { + case "vrfName", "address", "maskLen": + tags[subfield.Name] = decodeTag(subfield) + } + if value := decodeValue(subfield); value != nil { + if err := grouper.Add(measurement, tags, timestamp, subfield.Name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", subfield.Name, err) + } + } + if subfield.Name != "nextHop" { + continue + } + //For next hop table fill the keys in the tag - which is address and vrfname + for _, subf := range subfield.Fields { + for _, ff := range subf.Fields { + switch ff.Name { + case "address", "vrfName": + key := "nextHop/" + ff.Name + tags[key] = decodeTag(ff) + } + if value := decodeValue(ff); value != nil { + name := "nextHop/" + ff.Name + if err := grouper.Add(measurement, tags, timestamp, name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } + } + } + } + } +} + +func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, + encodingPath string, tags map[string]string, timestamp time.Time) { + // DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/ + var nxAttributes *telemetry.TelemetryField + isDme := strings.Contains(encodingPath, "sys/") + if encodingPath == "rib" { + //handle native data path rib + c.parseRib(grouper, field, encodingPath, tags, timestamp) + return + } + if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 { + return + } + + if field.Fields[0] != nil && field.Fields[0].Fields != nil && field.Fields[0].Fields[0] != nil && field.Fields[0].Fields[0].Fields[0].Name != "attributes" { + return + } + nxAttributes = field.Fields[0].Fields[0].Fields[0].Fields[0] + + for _, subfield := range nxAttributes.Fields { + if subfield.Name == "dn" { + tags["dn"] = decodeTag(subfield) + } else { + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) + } + } +} + func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, - path string, tags map[string]string, timestamp time.Time) { + encodingPath string, tags map[string]string, timestamp time.Time) { name := strings.Replace(field.Name, "-", "_", -1) + + if (name == "modTs" || name == "createTs") && decodeValue(field) == "never" { + return + } if len(name) == 0 { name = prefix } else if len(prefix) > 0 { name = prefix + "/" + name } - extraTags := c.extraTags[strings.Replace(path, "-", "_", -1)+"/"+name] + extraTags := c.extraTags[strings.Replace(encodingPath, "-", "_", -1)+"/"+name] if value := decodeValue(field); value != nil { // Do alias lookup, to shorten measurement names - measurement := path - if alias, ok := c.aliases[path]; ok { + measurement := encodingPath + if alias, ok := c.internalAliases[encodingPath]; ok { measurement = alias } else { c.mutex.Lock() - if _, haveWarned := c.warned[path]; !haveWarned { - c.Log.Debugf("No measurement alias for encoding path: %s", path) - c.warned[path] = struct{}{} + if _, haveWarned := c.warned[encodingPath]; !haveWarned { + c.Log.Debugf("No measurement alias for encoding path: %s", encodingPath) + c.warned[encodingPath] = struct{}{} } c.mutex.Unlock() } - grouper.Add(measurement, tags, timestamp, name, value) + if val := c.nxosValueXform(field, value, encodingPath); val != nil { + if err := grouper.Add(measurement, tags, timestamp, name, val); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } + } else { + if err := grouper.Add(measurement, tags, timestamp, name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } + } return } @@ -429,16 +576,33 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie } var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField - isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + isNXOS := !strings.ContainsRune(encodingPath, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + isEVENT := isNXOS && strings.Contains(encodingPath, "EVENT-LIST") + nxChildren = nil + nxAttributes = nil for _, subfield := range field.Fields { if isNXOS && subfield.Name == "attributes" && len(subfield.Fields) > 0 { nxAttributes = subfield.Fields[0] } else if isNXOS && subfield.Name == "children" && len(subfield.Fields) > 0 { - nxChildren = subfield + if !isEVENT { + nxChildren = subfield + } else { + sub := subfield.Fields + if len(sub) > 0 && sub[0] != nil && sub[0].Fields[0].Name == "subscriptionId" && len(sub[0].Fields) >= 2 { + nxAttributes = sub[0].Fields[1].Fields[0].Fields[0].Fields[0].Fields[0].Fields[0] + } + } + //if nxAttributes == NULL then class based query. + if nxAttributes == nil { + //call function walking over walking list. + for _, sub := range subfield.Fields { + c.parseClassAttributeField(grouper, sub, encodingPath, tags, timestamp) + } + } } else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") { nxRows = subfield } else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding - c.parseContentField(grouper, subfield, name, path, tags, timestamp) + c.parseContentField(grouper, subfield, name, encodingPath, tags, timestamp) } } @@ -450,9 +614,16 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie for i, subfield := range row.Fields { if i == 0 { // First subfield contains the index, promote it from value to tag tags[prefix] = decodeTag(subfield) + //We can have subfield so recursively handle it. + if len(row.Fields) == 1 { + tags["row_number"] = strconv.FormatInt(int64(i), 10) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) + } } else { - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } + // Nxapi we can't identify keys always from prefix + tags["row_number"] = strconv.FormatInt(int64(i), 10) } delete(tags, prefix) } @@ -480,14 +651,14 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie for _, subfield := range nxAttributes.Fields { if subfield.Name != "rn" { - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } } if nxChildren != nil { // This is a nested structure, children will inherit relative name keys of parent for _, subfield := range nxChildren.Fields { - c.parseContentField(grouper, subfield, prefix, path, tags, timestamp) + c.parseContentField(grouper, subfield, prefix, encodingPath, tags, timestamp) } } delete(tags, prefix) @@ -501,9 +672,11 @@ func (c *CiscoTelemetryMDT) Address() net.Addr { func (c *CiscoTelemetryMDT) Stop() { if c.grpcServer != nil { // Stop server and terminate all running dialout routines + //nolint:errcheck,revive // we cannot do anything if the stopping fails c.grpcServer.Stop() } if c.listener != nil { + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() } c.wg.Wait() @@ -531,6 +704,10 @@ const sampleConfig = ` ## Define aliases to map telemetry encoding paths to simple measurement names [inputs.cisco_telemetry_mdt.aliases] ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. + [inputs.cisco_telemetry_mdt.dmes] + ModTs = "ignore" + CreateTs = "ignore" ` // SampleConfig of plugin diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index ea200bc744a7d..90fc949276948 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -4,15 +4,17 @@ import ( "context" "encoding/binary" "errors" + "io" "net" "testing" dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" - telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" - "github.com/influxdata/telegraf/testutil" + telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + + "github.com/influxdata/telegraf/testutil" ) func TestHandleTelemetryTwoSimple(t *testing.T) { @@ -22,55 +24,55 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, }, { Name: "uint64", - ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234}, + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 1234}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "bool", - ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true}, + ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: true}, }, }, }, }, }, { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str2"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "bool", - ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false}, + ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: false}, }, }, }, @@ -78,7 +80,8 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -99,26 +102,26 @@ func TestHandleTelemetrySingleNested(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/nested/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "nested", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "key", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "level", - ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3}, + ValueByType: &telemetryBis.TelemetryField_DoubleValue{DoubleValue: 3}, }, }, }, @@ -128,16 +131,16 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "nested", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "value", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -149,7 +152,8 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -166,49 +170,49 @@ func TestHandleEmbeddedTags(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/extra", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "list", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry1"}, }, { Name: "test", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { Name: "list", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry2"}, }, { Name: "test", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -218,7 +222,8 @@ func TestHandleEmbeddedTags(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -238,57 +243,57 @@ func TestHandleNXAPI(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "show nxapi", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "TABLE_nxapi", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "ROW_nxapi", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "index", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "index", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i2"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -306,19 +311,179 @@ func TestHandleNXAPI(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) - tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "source": "hostname", "subscription": "subscription"} + tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "row_number": "0", "source": "hostname", "subscription": "subscription"} fields1 := map[string]interface{}{"value": "foo"} - tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "source": "hostname", "subscription": "subscription"} + tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "row_number": "0", "source": "hostname", "subscription": "subscription"} fields2 := map[string]interface{}{"value": "bar"} acc.AssertContainsTaggedFields(t, "nxapi", fields1, tags1) acc.AssertContainsTaggedFields(t, "nxapi", fields2, tags2) } +func TestHandleNXAPIXformNXAPI(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nxapi": "show nxapi"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetryBis.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "show processes cpu", + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "keys", + Fields: []*telemetryBis.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "TABLE_process_cpu", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "ROW_process_cpu", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "index", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, + }, + { + Name: "value", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, err := proto.Marshal(telemetry) + require.NoError(t, err) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "show processes cpu", "foo": "bar", "TABLE_process_cpu": "i1", "row_number": "0", "source": "hostname", "subscription": "subscription"} + fields1 := map[string]interface{}{"value": "foo"} + acc.AssertContainsTaggedFields(t, "show processes cpu", fields1, tags1) +} + +func TestHandleNXXformMulti(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/lldp"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetryBis.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "sys/lldp", + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "keys", + Fields: []*telemetryBis.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "fooEntity", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "attributes", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "rn", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, + }, + { + Name: "portIdV", + ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: 12}, + }, + { + Name: "portDesc", + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 100}, + }, + { + Name: "test", + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 281474976710655}, + }, + { + Name: "subscriptionId", + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 2814749767106551}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, err := proto.Marshal(telemetry) + require.NoError(t, err) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + //validate various transformation scenaarios newly added in the code. + fields := map[string]interface{}{"portIdV": "12", "portDesc": "100", "test": int64(281474976710655), "subscriptionId": "2814749767106551"} + acc.AssertContainsFields(t, "dme", fields) +} + func TestHandleNXDME(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/dme"}} acc := &testutil.Accumulator{} @@ -326,45 +491,45 @@ func TestHandleNXDME(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "sys/dme", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "fooEntity", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "attributes", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "rn", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, @@ -382,7 +547,8 @@ func TestHandleNXDME(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -409,39 +575,40 @@ func TestTCPDialoutOverflow(t *testing.T) { addr := c.Address() conn, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - binary.Write(conn, binary.BigEndian, hdr) - conn.Read([]byte{0}) - conn.Close() + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn.Close()) c.Stop() require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) } -func mockTelemetryMessage() *telemetry.Telemetry { - return &telemetry.Telemetry{ +func mockTelemetryMessage() *telemetryBis.Telemetry { + return &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "value", - ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1}, + ValueByType: &telemetryBis.TelemetryField_Sint64Value{Sint64Value: -1}, }, }, }, @@ -472,32 +639,42 @@ func TestTCPDialoutMultiple(t *testing.T) { conn, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn, binary.BigEndian, hdr) - conn.Write(data) + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Write(data) + require.NoError(t, err) conn2, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) telemetry.EncodingPath = "type:model/parallel/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn2, binary.BigEndian, hdr) - conn2.Write(data) - conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) - conn2.Read([]byte{0}) - conn2.Close() + require.NoError(t, binary.Write(conn2, binary.BigEndian, hdr)) + _, err = conn2.Write(data) + require.NoError(t, err) + _, err = conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + require.NoError(t, err) + _, err = conn2.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn2.Close()) telemetry.EncodingPath = "type:model/other/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn, binary.BigEndian, hdr) - conn.Write(data) - conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) - conn.Read([]byte{0}) + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Write(data) + require.NoError(t, err) + _, err = conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + require.NoError(t, err) + _, err = conn.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) c.Stop() - conn.Close() + require.NoError(t, conn.Close()) // We use the invalid dialout flags to let the server close the connection require.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) @@ -522,15 +699,18 @@ func TestGRPCDialoutError(t *testing.T) { require.NoError(t, err) addr := c.Address() - conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure()) + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure()) + require.NoError(t, err) client := dialout.NewGRPCMdtDialoutClient(conn) - stream, _ := client.MdtDialout(context.Background()) + stream, err := client.MdtDialout(context.Background()) + require.NoError(t, err) args := &dialout.MdtDialoutArgs{Errors: "foobar"} - stream.Send(args) + require.NoError(t, stream.Send(args)) // Wait for the server to close - stream.Recv() + _, err = stream.Recv() + require.True(t, err == nil || err == io.EOF) c.Stop() require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) @@ -545,35 +725,44 @@ func TestGRPCDialoutMultiple(t *testing.T) { telemetry := mockTelemetryMessage() addr := c.Address() - conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) client := dialout.NewGRPCMdtDialoutClient(conn) - stream, _ := client.MdtDialout(context.TODO()) + stream, err := client.MdtDialout(context.TODO()) + require.NoError(t, err) - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} - stream.Send(args) + require.NoError(t, stream.Send(args)) - conn2, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + conn2, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) client2 := dialout.NewGRPCMdtDialoutClient(conn2) - stream2, _ := client2.MdtDialout(context.TODO()) + stream2, err := client2.MdtDialout(context.TODO()) + require.NoError(t, err) telemetry.EncodingPath = "type:model/parallel/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) args = &dialout.MdtDialoutArgs{Data: data} - stream2.Send(args) - stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) - stream2.Recv() - conn2.Close() + require.NoError(t, stream2.Send(args)) + require.NoError(t, stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + _, err = stream2.Recv() + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn2.Close()) telemetry.EncodingPath = "type:model/other/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) args = &dialout.MdtDialoutArgs{Data: data} - stream.Send(args) - stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) - stream.Recv() + require.NoError(t, stream.Send(args)) + require.NoError(t, stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + _, err = stream.Recv() + require.True(t, err == nil || err == io.EOF) c.Stop() - conn.Close() + require.NoError(t, conn.Close()) require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) @@ -588,5 +777,4 @@ func TestGRPCDialoutMultiple(t *testing.T) { tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"} fields = map[string]interface{}{"value": int64(-1)} acc.AssertContainsTaggedFields(t, "other", fields, tags) - } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go new file mode 100644 index 0000000000000..1d7d95a95a757 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -0,0 +1,876 @@ +package cisco_telemetry_mdt + +import ( + "strconv" + "strings" + + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" +) + +//xform Field to string +func xformValueString(field *telemetry.TelemetryField) string { + var str string + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if len(val.StringValue) > 0 { + return val.StringValue + } + case *telemetry.TelemetryField_Uint32Value: + str = strconv.FormatUint(uint64(val.Uint32Value), 10) + return str + case *telemetry.TelemetryField_Uint64Value: + str = strconv.FormatUint(val.Uint64Value, 10) + return str + case *telemetry.TelemetryField_Sint32Value: + str = strconv.FormatInt(int64(val.Sint32Value), 10) + return str + case *telemetry.TelemetryField_Sint64Value: + str = strconv.FormatInt(val.Sint64Value, 10) + return str + } + return "" +} + +//xform Uint64 to int64 +func nxosValueXformUint64Toint64(field *telemetry.TelemetryField, value interface{}) interface{} { + if field.GetUint64Value() != 0 { + return int64(value.(uint64)) + } + return nil +} + +//xform string to float +func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{}) interface{} { + //convert property to float from string. + vals := field.GetStringValue() + if vals != "" { + if valf, err := strconv.ParseFloat(vals, 64); err == nil { + return valf + } + } + return nil +} + +//xform string to uint64 +func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}) interface{} { + //string to uint64 + vals := field.GetStringValue() + if vals != "" { + if val64, err := strconv.ParseUint(vals, 10, 64); err == nil { + return val64 + } + } + return nil +} + +//xform string to int64 +func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{}) interface{} { + //string to int64 + vals := field.GetStringValue() + if vals != "" { + if val64, err := strconv.ParseInt(vals, 10, 64); err == nil { + return val64 + } + } + return nil +} + +//auto-xform float properties +func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, _ interface{}) interface{} { + //check if we want auto xformation + vals := field.GetStringValue() + if vals != "" { + if valf, err := strconv.ParseFloat(vals, 64); err == nil { + return valf + } + } // switch + return nil +} + +//xform uint64 to string +func nxosValueXformUint64ToString(field *telemetry.TelemetryField, _ interface{}) interface{} { + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if len(val.StringValue) > 0 { + return val.StringValue + } + case *telemetry.TelemetryField_Uint64Value: + return strconv.FormatUint(val.Uint64Value, 10) + } + return nil +} + +//Xform value field. +func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, value interface{}, path string) interface{} { + if strings.ContainsRune(path, ':') { + // not NXOS + return nil + } + if _, ok := c.propMap[field.Name]; ok { + return c.propMap[field.Name](field, value) + } + //check if we want auto xformation + if _, ok := c.propMap["auto-prop-xfromi"]; ok { + return c.propMap["auto-prop-xfrom"](field, value) + } + //Now check path based conversion. + //If mapping is found then do the required transformation. + if c.nxpathMap[path] == nil { + return nil + } + switch c.nxpathMap[path][field.Name] { + //Xformation supported is only from String, Uint32 and Uint64 + case "integer": + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if vali, err := strconv.ParseInt(val.StringValue, 10, 32); err == nil { + return vali + } + case *telemetry.TelemetryField_Uint32Value: + vali, ok := value.(uint32) + if ok { + return vali + } + case *telemetry.TelemetryField_Uint64Value: + vali, ok := value.(uint64) + if ok { + return vali + } + } //switch + return nil + //Xformation supported is only from String + case "float": + //nolint:revive // switch needed for `.(type)` + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if valf, err := strconv.ParseFloat(val.StringValue, 64); err == nil { + return valf + } + } //switch + return nil + case "string": + return xformValueString(field) + case "int64": + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if vali, err := strconv.ParseInt(val.StringValue, 10, 64); err == nil { + return vali + } + case *telemetry.TelemetryField_Uint64Value: + return int64(value.(uint64)) + } //switch + } //switch + return nil +} + +func (c *CiscoTelemetryMDT) initMemPhys() { + c.nxpathMap["show processes memory physical"] = map[string]string{"processname": "string"} +} + +func (c *CiscoTelemetryMDT) initBgpV4() { + key := "show bgp ipv4 unicast" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["aspath"] = "string" +} + +func (c *CiscoTelemetryMDT) initCPU() { + key := "show processes cpu" + c.nxpathMap[key] = make(map[string]string, 5) + c.nxpathMap[key]["kernel_percent"] = "float" + c.nxpathMap[key]["idle_percent"] = "float" + c.nxpathMap[key]["process"] = "string" + c.nxpathMap[key]["user_percent"] = "float" + c.nxpathMap[key]["onesec"] = "float" +} + +func (c *CiscoTelemetryMDT) initResources() { + key := "show system resources" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["cpu_state_user"] = "float" + c.nxpathMap[key]["kernel"] = "float" + c.nxpathMap[key]["current_memory_status"] = "string" + c.nxpathMap[key]["load_avg_15min"] = "float" + c.nxpathMap[key]["idle"] = "float" + c.nxpathMap[key]["load_avg_1min"] = "float" + c.nxpathMap[key]["user"] = "float" + c.nxpathMap[key]["cpu_state_idle"] = "float" + c.nxpathMap[key]["load_avg_5min"] = "float" + c.nxpathMap[key]["cpu_state_kernel"] = "float" +} + +func (c *CiscoTelemetryMDT) initPower() { + key := "show environment power" + c.nxpathMap[key] = make(map[string]string, 100) + c.nxpathMap[key]["reserve_sup"] = "string" + c.nxpathMap[key]["det_volt"] = "string" + c.nxpathMap[key]["heatsink_temp"] = "string" + c.nxpathMap[key]["det_pintot"] = "string" + c.nxpathMap[key]["det_iinb"] = "string" + c.nxpathMap[key]["ps_input_current"] = "string" + c.nxpathMap[key]["modnum"] = "string" + c.nxpathMap[key]["trayfannum"] = "string" + c.nxpathMap[key]["modstatus_3k"] = "string" + c.nxpathMap[key]["fan2rpm"] = "string" + c.nxpathMap[key]["amps_alloced"] = "string" + c.nxpathMap[key]["all_inlets_connected"] = "string" + c.nxpathMap[key]["tot_pow_out_actual_draw"] = "string" + c.nxpathMap[key]["ps_redun_op_mode"] = "string" + c.nxpathMap[key]["curtemp"] = "string" + c.nxpathMap[key]["mod_model"] = "string" + c.nxpathMap[key]["fanmodel"] = "string" + c.nxpathMap[key]["ps_output_current"] = "string" + c.nxpathMap[key]["majthres"] = "string" + c.nxpathMap[key]["input_type"] = "string" + c.nxpathMap[key]["allocated"] = "string" + c.nxpathMap[key]["fanhwver"] = "string" + c.nxpathMap[key]["clkhwver"] = "string" + c.nxpathMap[key]["fannum"] = "string" + c.nxpathMap[key]["watts_requested"] = "string" + c.nxpathMap[key]["cumulative_power"] = "string" + c.nxpathMap[key]["tot_gridB_capacity"] = "string" + c.nxpathMap[key]["pow_used_by_mods"] = "string" + c.nxpathMap[key]["tot_pow_alloc_budgeted"] = "string" + c.nxpathMap[key]["psumod"] = "string" + c.nxpathMap[key]["ps_status_3k"] = "string" + c.nxpathMap[key]["temptype"] = "string" + c.nxpathMap[key]["regval"] = "string" + c.nxpathMap[key]["inlet_temp"] = "string" + c.nxpathMap[key]["det_cord"] = "string" + c.nxpathMap[key]["reserve_fan"] = "string" + c.nxpathMap[key]["det_pina"] = "string" + c.nxpathMap[key]["minthres"] = "string" + c.nxpathMap[key]["actual_draw"] = "string" + c.nxpathMap[key]["sensor"] = "string" + c.nxpathMap[key]["zone"] = "string" + c.nxpathMap[key]["det_iin"] = "string" + c.nxpathMap[key]["det_iout"] = "string" + c.nxpathMap[key]["det_vin"] = "string" + c.nxpathMap[key]["fan1rpm"] = "string" + c.nxpathMap[key]["tot_gridA_capacity"] = "string" + c.nxpathMap[key]["fanperc"] = "string" + c.nxpathMap[key]["det_pout"] = "string" + c.nxpathMap[key]["alarm_str"] = "string" + c.nxpathMap[key]["zonespeed"] = "string" + c.nxpathMap[key]["det_total_cap"] = "string" + c.nxpathMap[key]["reserve_xbar"] = "string" + c.nxpathMap[key]["det_vout"] = "string" + c.nxpathMap[key]["watts_alloced"] = "string" + c.nxpathMap[key]["ps_in_power"] = "string" + c.nxpathMap[key]["tot_pow_input_actual_draw"] = "string" + c.nxpathMap[key]["ps_output_voltage"] = "string" + c.nxpathMap[key]["det_name"] = "string" + c.nxpathMap[key]["tempmod"] = "string" + c.nxpathMap[key]["clockname"] = "string" + c.nxpathMap[key]["fanname"] = "string" + c.nxpathMap[key]["regnumstr"] = "string" + c.nxpathMap[key]["bitnumstr"] = "string" + c.nxpathMap[key]["ps_slot"] = "string" + c.nxpathMap[key]["actual_out"] = "string" + c.nxpathMap[key]["ps_input_voltage"] = "string" + c.nxpathMap[key]["psmodel"] = "string" + c.nxpathMap[key]["speed"] = "string" + c.nxpathMap[key]["clkmodel"] = "string" + c.nxpathMap[key]["ps_redun_mode_3k"] = "string" + c.nxpathMap[key]["tot_pow_capacity"] = "string" + c.nxpathMap[key]["amps"] = "string" + c.nxpathMap[key]["available_pow"] = "string" + c.nxpathMap[key]["reserve_supxbarfan"] = "string" + c.nxpathMap[key]["watts"] = "string" + c.nxpathMap[key]["det_pinb"] = "string" + c.nxpathMap[key]["det_vinb"] = "string" + c.nxpathMap[key]["ps_state"] = "string" + c.nxpathMap[key]["det_sw_alarm"] = "string" + c.nxpathMap[key]["regnum"] = "string" + c.nxpathMap[key]["amps_requested"] = "string" + c.nxpathMap[key]["fanrpm"] = "string" + c.nxpathMap[key]["actual_input"] = "string" + c.nxpathMap[key]["outlet_temp"] = "string" + c.nxpathMap[key]["tot_capa"] = "string" +} + +func (c *CiscoTelemetryMDT) initPtpCorrection() { + key := "show ptp corrections" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["sup-time"] = "string" + c.nxpathMap[key]["correction-val"] = "int64" + c.nxpathMap[key]["ptp-header"] = "string" + c.nxpathMap[key]["intf-name"] = "string" + c.nxpathMap[key]["ptp-end"] = "string" +} + +func (c *CiscoTelemetryMDT) initTrans() { + key := "show interface transceiver details" + c.nxpathMap[key] = make(map[string]string, 100) + c.nxpathMap[key]["uncorrect_ber_alrm_hi"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_warn_lo"] = "string" + c.nxpathMap[key]["current_warn_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_max_alrm_hi"] = "string" + c.nxpathMap[key]["serialnum"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc_warn_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_max_warn_lo"] = "string" + c.nxpathMap[key]["laser_temp_warn_hi"] = "float" + c.nxpathMap[key]["type"] = "string" + c.nxpathMap[key]["rx_pwr_0"] = "float" + c.nxpathMap[key]["rx_pwr_warn_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_warn_hi"] = "string" + c.nxpathMap[key]["qsfp_or_cfp"] = "string" + c.nxpathMap[key]["protocol_type"] = "string" + c.nxpathMap[key]["uncorrect_ber"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_alrm_hi"] = "string" + c.nxpathMap[key]["tec_current"] = "float" + c.nxpathMap[key]["pre_fec_ber"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_warn_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_min"] = "string" + c.nxpathMap[key]["current_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_warn_lo"] = "string" + c.nxpathMap[key]["snr_warn_lo"] = "float" + c.nxpathMap[key]["rev"] = "string" + c.nxpathMap[key]["laser_temp_alrm_lo"] = "float" + c.nxpathMap[key]["current"] = "float" + c.nxpathMap[key]["rx_pwr_1"] = "float" + c.nxpathMap[key]["tec_current_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_warn_lo"] = "string" + c.nxpathMap[key]["cisco_part_number"] = "string" + c.nxpathMap[key]["uncorrect_ber_acc_warn_hi"] = "string" + c.nxpathMap[key]["temp_warn_hi"] = "float" + c.nxpathMap[key]["laser_freq_warn_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_max_alrm_lo"] = "string" + c.nxpathMap[key]["snr_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_alrm_lo"] = "string" + c.nxpathMap[key]["tx_pwr_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_min_warn_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_warn_hi"] = "string" + c.nxpathMap[key]["rx_pwr_alrm_hi"] = "float" + c.nxpathMap[key]["tec_current_warn_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_alrm_hi"] = "string" + c.nxpathMap[key]["rx_pwr_4"] = "float" + c.nxpathMap[key]["uncorrect_ber_cur"] = "string" + c.nxpathMap[key]["pre_fec_ber_alrm_hi"] = "string" + c.nxpathMap[key]["rx_pwr_warn_lo"] = "float" + c.nxpathMap[key]["bit_encoding"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc"] = "string" + c.nxpathMap[key]["sfp"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc_alrm_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_min"] = "string" + c.nxpathMap[key]["current_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_max_alrm_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_warn_hi"] = "string" + c.nxpathMap[key]["current_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_acc_alrm_lo"] = "string" + c.nxpathMap[key]["snr_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc"] = "string" + c.nxpathMap[key]["tx_len"] = "string" + c.nxpathMap[key]["uncorrect_ber_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_alrm_lo"] = "string" + c.nxpathMap[key]["txcvr_type"] = "string" + c.nxpathMap[key]["tec_current_alrm_lo"] = "float" + c.nxpathMap[key]["volt_alrm_lo"] = "float" + c.nxpathMap[key]["temp_alrm_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_min_warn_lo"] = "string" + c.nxpathMap[key]["laser_freq"] = "float" + c.nxpathMap[key]["uncorrect_ber_min_warn_hi"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_max_warn_hi"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["fiber_type_byte0"] = "string" + c.nxpathMap[key]["laser_freq_alrm_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_warn_hi"] = "string" + c.nxpathMap[key]["partnum"] = "string" + c.nxpathMap[key]["snr"] = "float" + c.nxpathMap[key]["volt_alrm_hi"] = "float" + c.nxpathMap[key]["connector_type"] = "string" + c.nxpathMap[key]["tx_medium"] = "string" + c.nxpathMap[key]["tx_pwr_warn_hi"] = "float" + c.nxpathMap[key]["cisco_vendor_id"] = "string" + c.nxpathMap[key]["cisco_ext_id"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_warn_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_max"] = "string" + c.nxpathMap[key]["uncorrect_ber_min_alrm_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_warn_hi"] = "string" + c.nxpathMap[key]["tx_pwr_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_warn_lo"] = "string" + c.nxpathMap[key]["10gbe_code"] = "string" + c.nxpathMap[key]["cable_type"] = "string" + c.nxpathMap[key]["laser_freq_alrm_hi"] = "float" + c.nxpathMap[key]["rx_pwr_3"] = "float" + c.nxpathMap[key]["rx_pwr"] = "float" + c.nxpathMap[key]["volt_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_alrm_hi"] = "string" + c.nxpathMap[key]["temperature"] = "float" + c.nxpathMap[key]["voltage"] = "float" + c.nxpathMap[key]["tx_pwr"] = "float" + c.nxpathMap[key]["laser_temp_alrm_hi"] = "float" + c.nxpathMap[key]["tx_speeds"] = "string" + c.nxpathMap[key]["uncorrect_ber_min_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_alrm_hi"] = "string" + c.nxpathMap[key]["ciscoid"] = "string" + c.nxpathMap[key]["tx_pwr_warn_lo"] = "float" + c.nxpathMap[key]["cisco_product_id"] = "string" + c.nxpathMap[key]["info_not_available"] = "string" + c.nxpathMap[key]["laser_temp"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur"] = "string" + c.nxpathMap[key]["fiber_type_byte1"] = "string" + c.nxpathMap[key]["tx_type"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_warn_lo"] = "string" + c.nxpathMap[key]["temp_alrm_lo"] = "float" + c.nxpathMap[key]["volt_warn_lo"] = "float" + c.nxpathMap[key]["rx_pwr_alrm_lo"] = "float" + c.nxpathMap[key]["rx_pwr_2"] = "float" + c.nxpathMap[key]["tec_current_alrm_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_alrm_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_alrm_hi"] = "string" + c.nxpathMap[key]["temp_warn_lo"] = "float" + c.nxpathMap[key]["snr_warn_hi"] = "float" + c.nxpathMap[key]["laser_temp_warn_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_acc_warn_hi"] = "string" + c.nxpathMap[key]["laser_freq_warn_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_max"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmp() { + key := "show ip igmp groups vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["group-type"] = "string" + c.nxpathMap[key]["translate"] = "string" + c.nxpathMap[key]["sourceaddress"] = "string" + c.nxpathMap[key]["vrf-cntxt"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["group-addr"] = "string" + c.nxpathMap[key]["uptime"] = "string" +} + +func (c *CiscoTelemetryMDT) initVrfAll() { + key := "show ip igmp interface vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["static-group-map"] = "string" + c.nxpathMap[key]["rll"] = "string" + c.nxpathMap[key]["host-proxy"] = "string" + c.nxpathMap[key]["il"] = "string" + c.nxpathMap[key]["join-group-map"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["host-proxy-group-map"] = "string" + c.nxpathMap[key]["next-query"] = "string" + c.nxpathMap[key]["q-ver"] = "string" + c.nxpathMap[key]["if-status"] = "string" + c.nxpathMap[key]["un-solicited"] = "string" + c.nxpathMap[key]["ip-sum"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoop() { + key := "show ip igmp snooping" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["repsup"] = "string" + c.nxpathMap[key]["omf_enabled"] = "string" + c.nxpathMap[key]["v3repsup"] = "string" + c.nxpathMap[key]["grepsup"] = "string" + c.nxpathMap[key]["lkupmode"] = "string" + c.nxpathMap[key]["description"] = "string" + c.nxpathMap[key]["vlinklocalgrpsup"] = "string" + c.nxpathMap[key]["gv3repsup"] = "string" + c.nxpathMap[key]["reportfloodall"] = "string" + c.nxpathMap[key]["leavegroupaddress"] = "string" + c.nxpathMap[key]["enabled"] = "string" + c.nxpathMap[key]["omf"] = "string" + c.nxpathMap[key]["sq"] = "string" + c.nxpathMap[key]["sqr"] = "string" + c.nxpathMap[key]["eht"] = "string" + c.nxpathMap[key]["fl"] = "string" + c.nxpathMap[key]["reportfloodenable"] = "string" + c.nxpathMap[key]["snoop-on"] = "string" + c.nxpathMap[key]["glinklocalgrpsup"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroups() { + key := "show ip igmp snooping groups" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroupDetails() { + key := "show ip igmp snooping groups detail" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroupsSumm() { + key := "show ip igmp snooping groups summary" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initMrouter() { + key := "show ip igmp snooping mrouter" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["expires"] = "string" +} + +func (c *CiscoTelemetryMDT) initSnoopStats() { + key := "show ip igmp snooping statistics" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["ut"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimInterface() { + key := "show ip pim interface vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["if-is-border"] = "string" + c.nxpathMap[key]["cached_if_status"] = "string" + c.nxpathMap[key]["genid"] = "string" + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["last-cleared"] = "string" + c.nxpathMap[key]["is-pim-vpc-svi"] = "string" + c.nxpathMap[key]["if-addr"] = "string" + c.nxpathMap[key]["is-pim-enabled"] = "string" + c.nxpathMap[key]["pim-dr-address"] = "string" + c.nxpathMap[key]["hello-timer"] = "string" + c.nxpathMap[key]["pim-bfd-enabled"] = "string" + c.nxpathMap[key]["vpc-peer-nbr"] = "string" + c.nxpathMap[key]["nbr-policy-name"] = "string" + c.nxpathMap[key]["is-auto-enabled"] = "string" + c.nxpathMap[key]["if-status"] = "string" + c.nxpathMap[key]["jp-out-policy-name"] = "string" + c.nxpathMap[key]["if-addr-summary"] = "string" + c.nxpathMap[key]["if-dr"] = "string" + c.nxpathMap[key]["jp-in-policy-name"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimNeigh() { + key := "show ip pim neighbor vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["longest-hello-intvl"] = "string" + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["bfd-state"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimRoute() { + key := "show ip pim route vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["rpf-nbr-1"] = "string" + c.nxpathMap[key]["rpf-nbr-addr"] = "string" + c.nxpathMap[key]["register"] = "string" + c.nxpathMap[key]["sgexpire"] = "string" + c.nxpathMap[key]["oif-bf-str"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["rp-addr"] = "string" + c.nxpathMap[key]["immediate-bf-str"] = "string" + c.nxpathMap[key]["sgr-prune-list-bf-str"] = "string" + c.nxpathMap[key]["context-name"] = "string" + c.nxpathMap[key]["intf-name"] = "string" + c.nxpathMap[key]["immediate-timeout-bf-str"] = "string" + c.nxpathMap[key]["rp-local"] = "string" + c.nxpathMap[key]["sgrexpire"] = "string" + c.nxpathMap[key]["timeout-bf-str"] = "string" + c.nxpathMap[key]["timeleft"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimRp() { + key := "show ip pim rp vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["is-bsr-forward-only"] = "string" + c.nxpathMap[key]["is-rpaddr-local"] = "string" + c.nxpathMap[key]["bsr-expires"] = "string" + c.nxpathMap[key]["autorp-expire-time"] = "string" + c.nxpathMap[key]["rp-announce-policy-name"] = "string" + c.nxpathMap[key]["rp-cand-policy-name"] = "string" + c.nxpathMap[key]["is-autorp-forward-only"] = "string" + c.nxpathMap[key]["rp-uptime"] = "string" + c.nxpathMap[key]["rp-owner-flags"] = "string" + c.nxpathMap[key]["df-bits-recovered"] = "string" + c.nxpathMap[key]["bs-timer"] = "string" + c.nxpathMap[key]["rp-discovery-policy-name"] = "string" + c.nxpathMap[key]["arp-rp-addr"] = "string" + c.nxpathMap[key]["auto-rp-addr"] = "string" + c.nxpathMap[key]["autorp-expires"] = "string" + c.nxpathMap[key]["is-autorp-enabled"] = "string" + c.nxpathMap[key]["is-bsr-local"] = "string" + c.nxpathMap[key]["is-autorp-listen-only"] = "string" + c.nxpathMap[key]["autorp-dis-timer"] = "string" + c.nxpathMap[key]["bsr-rp-expires"] = "string" + c.nxpathMap[key]["static-rp-group-map"] = "string" + c.nxpathMap[key]["rp-source"] = "string" + c.nxpathMap[key]["autorp-cand-address"] = "string" + c.nxpathMap[key]["autorp-up-time"] = "string" + c.nxpathMap[key]["is-bsr-enabled"] = "string" + c.nxpathMap[key]["bsr-uptime"] = "string" + c.nxpathMap[key]["is-bsr-listen-only"] = "string" + c.nxpathMap[key]["rpf-nbr-address"] = "string" + c.nxpathMap[key]["is-rp-local"] = "string" + c.nxpathMap[key]["is-autorp-local"] = "string" + c.nxpathMap[key]["bsr-policy-name"] = "string" + c.nxpathMap[key]["grange-grp"] = "string" + c.nxpathMap[key]["rp-addr"] = "string" + c.nxpathMap[key]["anycast-rp-addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimStats() { + key := "show ip pim statistics vrf all" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["vrf-name"] = "string" +} + +func (c *CiscoTelemetryMDT) initIntfBrief() { + key := "show interface brief" + c.nxpathMap[key] = make(map[string]string, 2) + c.nxpathMap[key]["speed"] = "string" + c.nxpathMap[key]["vlan"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimVrf() { + key := "show ip pim vrf all" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["table-id"] = "string" +} + +func (c *CiscoTelemetryMDT) initIPMroute() { + key := "show ip mroute summary vrf all" + c.nxpathMap[key] = make(map[string]string, 40) + c.nxpathMap[key]["nat-mode"] = "string" + c.nxpathMap[key]["oif-name"] = "string" + c.nxpathMap[key]["nat-route-type"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["mofrr-nbr"] = "string" + c.nxpathMap[key]["extranet_addr"] = "string" + c.nxpathMap[key]["stale-route"] = "string" + c.nxpathMap[key]["pending"] = "string" + c.nxpathMap[key]["bidir"] = "string" + c.nxpathMap[key]["expry_timer"] = "string" + c.nxpathMap[key]["mofrr-iif"] = "string" + c.nxpathMap[key]["group_addrs"] = "string" + c.nxpathMap[key]["mpib-name"] = "string" + c.nxpathMap[key]["rpf"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["route-mdt-iod"] = "string" + c.nxpathMap[key]["sr-oif"] = "string" + c.nxpathMap[key]["stats-rate-buf"] = "string" + c.nxpathMap[key]["source_addr"] = "string" + c.nxpathMap[key]["route-iif"] = "string" + c.nxpathMap[key]["rpf-nbr"] = "string" + c.nxpathMap[key]["translated-route-src"] = "string" + c.nxpathMap[key]["group_addr"] = "string" + c.nxpathMap[key]["lisp-src-rloc"] = "string" + c.nxpathMap[key]["stats-pndg"] = "string" + c.nxpathMap[key]["rate_buf"] = "string" + c.nxpathMap[key]["extranet_vrf_name"] = "string" + c.nxpathMap[key]["fabric-interest"] = "string" + c.nxpathMap[key]["translated-route-grp"] = "string" + c.nxpathMap[key]["internal"] = "string" + c.nxpathMap[key]["oif-mpib-name"] = "string" + c.nxpathMap[key]["oif-uptime"] = "string" + c.nxpathMap[key]["omd-vpc-svi"] = "string" + c.nxpathMap[key]["source_addrs"] = "string" + c.nxpathMap[key]["stale-oif"] = "string" + c.nxpathMap[key]["core-interest"] = "string" + c.nxpathMap[key]["oif-list-bitfield"] = "string" +} + +func (c *CiscoTelemetryMDT) initIpv6Mroute() { + key := "show ipv6 mroute summary vrf all" + c.nxpathMap[key] = make(map[string]string, 40) + c.nxpathMap[key]["nat-mode"] = "string" + c.nxpathMap[key]["oif-name"] = "string" + c.nxpathMap[key]["nat-route-type"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["mofrr-nbr"] = "string" + c.nxpathMap[key]["extranet_addr"] = "string" + c.nxpathMap[key]["stale-route"] = "string" + c.nxpathMap[key]["pending"] = "string" + c.nxpathMap[key]["bidir"] = "string" + c.nxpathMap[key]["expry_timer"] = "string" + c.nxpathMap[key]["mofrr-iif"] = "string" + c.nxpathMap[key]["group_addrs"] = "string" + c.nxpathMap[key]["mpib-name"] = "string" + c.nxpathMap[key]["rpf"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["route-mdt-iod"] = "string" + c.nxpathMap[key]["sr-oif"] = "string" + c.nxpathMap[key]["stats-rate-buf"] = "string" + c.nxpathMap[key]["source_addr"] = "string" + c.nxpathMap[key]["route-iif"] = "string" + c.nxpathMap[key]["rpf-nbr"] = "string" + c.nxpathMap[key]["translated-route-src"] = "string" + c.nxpathMap[key]["group_addr"] = "string" + c.nxpathMap[key]["lisp-src-rloc"] = "string" + c.nxpathMap[key]["stats-pndg"] = "string" + c.nxpathMap[key]["rate_buf"] = "string" + c.nxpathMap[key]["extranet_vrf_name"] = "string" + c.nxpathMap[key]["fabric-interest"] = "string" + c.nxpathMap[key]["translated-route-grp"] = "string" + c.nxpathMap[key]["internal"] = "string" + c.nxpathMap[key]["oif-mpib-name"] = "string" + c.nxpathMap[key]["oif-uptime"] = "string" + c.nxpathMap[key]["omd-vpc-svi"] = "string" + c.nxpathMap[key]["source_addrs"] = "string" + c.nxpathMap[key]["stale-oif"] = "string" + c.nxpathMap[key]["core-interest"] = "string" + c.nxpathMap[key]["oif-list-bitfield"] = "string" +} + +func (c *CiscoTelemetryMDT) initVpc() { + key := "sys/vpc" + c.nxpathMap[key] = make(map[string]string, 5) + c.nxpathMap[key]["type2CompatQualStr"] = "string" + c.nxpathMap[key]["compatQualStr"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["issuFromVer"] = "string" + c.nxpathMap[key]["issuToVer"] = "string" +} + +func (c *CiscoTelemetryMDT) initBgp() { + key := "sys/bgp" + c.nxpathMap[key] = make(map[string]string, 18) + c.nxpathMap[key]["dynRtMap"] = "string" + c.nxpathMap[key]["nhRtMap"] = "string" + c.nxpathMap[key]["epePeerSet"] = "string" + c.nxpathMap[key]["asn"] = "string" + c.nxpathMap[key]["peerImp"] = "string" + c.nxpathMap[key]["wght"] = "string" + c.nxpathMap[key]["assocDom"] = "string" + c.nxpathMap[key]["tblMap"] = "string" + c.nxpathMap[key]["unSupprMap"] = "string" + c.nxpathMap[key]["sessionContImp"] = "string" + c.nxpathMap[key]["allocLblRtMap"] = "string" + c.nxpathMap[key]["defMetric"] = "string" + c.nxpathMap[key]["password"] = "string" + c.nxpathMap[key]["retainRttRtMap"] = "string" + c.nxpathMap[key]["clusterId"] = "string" + c.nxpathMap[key]["localAsn"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["defOrgRtMap"] = "string" +} + +func (c *CiscoTelemetryMDT) initCh() { + key := "sys/ch" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["fanName"] = "string" + c.nxpathMap[key]["typeCordConnected"] = "string" + c.nxpathMap[key]["vendor"] = "string" + c.nxpathMap[key]["model"] = "string" + c.nxpathMap[key]["rev"] = "string" + c.nxpathMap[key]["vdrId"] = "string" + c.nxpathMap[key]["hardwareAlarm"] = "string" + c.nxpathMap[key]["unit"] = "string" + c.nxpathMap[key]["hwVer"] = "string" +} + +func (c *CiscoTelemetryMDT) initIntf() { + key := "sys/intf" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["descr"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["lastStCause"] = "string" + c.nxpathMap[key]["description"] = "string" + c.nxpathMap[key]["unit"] = "string" + c.nxpathMap[key]["operFECMode"] = "string" + c.nxpathMap[key]["operBitset"] = "string" + c.nxpathMap[key]["mdix"] = "string" +} + +func (c *CiscoTelemetryMDT) initProcsys() { + key := "sys/procsys" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["id"] = "string" + c.nxpathMap[key]["upTs"] = "string" + c.nxpathMap[key]["interval"] = "string" + c.nxpathMap[key]["memstatus"] = "string" +} + +func (c *CiscoTelemetryMDT) initProc() { + key := "sys/proc" + c.nxpathMap[key] = make(map[string]string, 2) + c.nxpathMap[key]["processName"] = "string" + c.nxpathMap[key]["procArg"] = "string" +} + +func (c *CiscoTelemetryMDT) initBfd() { + key := "sys/bfd/inst" + c.nxpathMap[key] = make(map[string]string, 4) + c.nxpathMap[key]["descr"] = "string" + c.nxpathMap[key]["vrfName"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["name"] = "string" +} + +func (c *CiscoTelemetryMDT) initLldp() { + key := "sys/lldp" + c.nxpathMap[key] = make(map[string]string, 7) + c.nxpathMap[key]["sysDesc"] = "string" + c.nxpathMap[key]["portDesc"] = "string" + c.nxpathMap[key]["portIdV"] = "string" + c.nxpathMap[key]["chassisIdV"] = "string" + c.nxpathMap[key]["sysName"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["id"] = "string" +} + +func (c *CiscoTelemetryMDT) initDb() { + c.nxpathMap = make(map[string]map[string]string, 200) + + c.initPower() + c.initMemPhys() + c.initBgpV4() + c.initCPU() + c.initResources() + c.initPtpCorrection() + c.initTrans() + c.initIgmp() + c.initVrfAll() + c.initIgmpSnoop() + c.initIgmpSnoopGroups() + c.initIgmpSnoopGroupDetails() + c.initIgmpSnoopGroupsSumm() + c.initMrouter() + c.initSnoopStats() + c.initPimInterface() + c.initPimNeigh() + c.initPimRoute() + c.initPimRp() + c.initPimStats() + c.initIntfBrief() + c.initPimVrf() + c.initIPMroute() + c.initIpv6Mroute() + c.initVpc() + c.initBgp() + c.initCh() + c.initIntf() + c.initProcsys() + c.initProc() + c.initBfd() + c.initLldp() +} diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index 187ead5cf6790..bdd4cf4730fbc 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -14,6 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -23,7 +23,7 @@ var defaultTimeout = 5 * time.Second var sampleConfig = ` ## Username for authorization on ClickHouse server - ## example: username = "default"" + ## example: username = "default" username = "default" ## Password for authorization on ClickHouse server @@ -101,20 +101,20 @@ func init() { ClientConfig: tls.ClientConfig{ InsecureSkipVerify: false, }, - Timeout: internal.Duration{Duration: defaultTimeout}, + Timeout: config.Duration(defaultTimeout), } }) } // ClickHouse Telegraf Input Plugin type ClickHouse struct { - Username string `toml:"username"` - Password string `toml:"password"` - Servers []string `toml:"servers"` - AutoDiscovery bool `toml:"auto_discovery"` - ClusterInclude []string `toml:"cluster_include"` - ClusterExclude []string `toml:"cluster_exclude"` - Timeout internal.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` + Servers []string `toml:"servers"` + AutoDiscovery bool `toml:"auto_discovery"` + ClusterInclude []string `toml:"cluster_include"` + ClusterExclude []string `toml:"cluster_exclude"` + Timeout config.Duration `toml:"timeout"` HTTPClient http.Client tls.ClientConfig } @@ -132,8 +132,8 @@ func (*ClickHouse) Description() string { // Start ClickHouse input service func (ch *ClickHouse) Start(telegraf.Accumulator) error { timeout := defaultTimeout - if ch.Timeout.Duration != 0 { - timeout = ch.Timeout.Duration + if time.Duration(ch.Timeout) != 0 { + timeout = time.Duration(ch.Timeout) } tlsCfg, err := ch.ClientConfig.TLSConfig() if err != nil { @@ -195,7 +195,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { } for _, conn := range connects { - metricsFuncs := []func(acc telegraf.Accumulator, conn *connect) error{ ch.tables, ch.zookeeper, @@ -212,7 +211,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { if err := metricFunc(acc, &conn); err != nil { acc.AddError(err) } - } for metric := range commonMetrics { @@ -262,21 +260,34 @@ func (ch *ClickHouse) clusterIncludeExcludeFilter() string { } func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, metric string) error { - var result []struct { + var intResult []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` } - if err := ch.execQuery(conn.url, commonMetrics[metric], &result); err != nil { - return err + + var floatResult []struct { + Metric string `json:"metric"` + Value float64 `json:"value"` } tags := ch.makeDefaultTags(conn) - fields := make(map[string]interface{}) - for _, r := range result { - fields[internal.SnakeCase(r.Metric)] = uint64(r.Value) - } + if commonMetricsIsFloat[metric] { + if err := ch.execQuery(conn.url, commonMetrics[metric], &floatResult); err != nil { + return err + } + for _, r := range floatResult { + fields[internal.SnakeCase(r.Metric)] = r.Value + } + } else { + if err := ch.execQuery(conn.url, commonMetrics[metric], &intResult); err != nil { + return err + } + for _, r := range intResult { + fields[internal.SnakeCase(r.Metric)] = uint64(r.Value) + } + } acc.AddFields("clickhouse_"+metric, fields, tags) return nil @@ -342,7 +353,6 @@ func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect) } func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) error { - var detachedParts []struct { DetachedParts chUInt64 `json:"detached_parts"` } @@ -363,7 +373,6 @@ func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) err } func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) error { - var brokenDictionaries []struct { Origin string `json:"origin"` BytesAllocated chUInt64 `json:"bytes_allocated"` @@ -397,7 +406,6 @@ func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) erro } func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { - var mutationsStatus []struct { Failed chUInt64 `json:"failed"` Running chUInt64 `json:"running"` @@ -424,7 +432,6 @@ func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { } func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { - var disksStatus []struct { Name string `json:"name"` Path string `json:"path"` @@ -448,14 +455,12 @@ func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { }, tags, ) - } return nil } func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { - var processesStats []struct { QueryType string `json:"query_type"` Percentile50 float64 `json:"p50"` @@ -479,7 +484,6 @@ func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { }, tags, ) - } return nil @@ -568,11 +572,11 @@ func (e *clickhouseError) Error() string { return fmt.Sprintf("received error code %d: %s", e.StatusCode, e.body) } -func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error { - q := url.Query() +func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) error { + q := address.Query() q.Set("query", query+" FORMAT JSON") - url.RawQuery = q.Encode() - req, _ := http.NewRequest("GET", url.String(), nil) + address.RawQuery = q.Encode() + req, _ := http.NewRequest("GET", address.String(), nil) if ch.Username != "" { req.Header.Add("X-ClickHouse-User", ch.Username) } @@ -583,9 +587,9 @@ func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error if err != nil { return err } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() if resp.StatusCode >= 300 { - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return &clickhouseError{ StatusCode: resp.StatusCode, body: body, @@ -601,7 +605,7 @@ func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error return err } - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err := io.Copy(io.Discard, resp.Body); err != nil { return err } return nil @@ -622,9 +626,9 @@ func (i *chUInt64) UnmarshalJSON(b []byte) error { } const ( - systemEventsSQL = "SELECT event AS metric, CAST(value AS UInt64) AS value FROM system.events" - systemMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.metrics" - systemAsyncMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.asynchronous_metrics" + systemEventsSQL = "SELECT event AS metric, toUInt64(value) AS value FROM system.events" + systemMetricsSQL = "SELECT metric, toUInt64(value) AS value FROM system.metrics" + systemAsyncMetricsSQL = "SELECT metric, toFloat64(value) AS value FROM system.asynchronous_metrics" systemPartsSQL = ` SELECT database, @@ -643,18 +647,18 @@ const ( systemZookeeperRootNodesSQL = "SELECT count() AS zk_root_nodes FROM system.zookeeper WHERE path='/'" systemReplicationExistsSQL = "SELECT count() AS replication_queue_exists FROM system.tables WHERE database='system' AND name='replication_queue'" - systemReplicationNumTriesSQL = "SELECT countIf(num_tries>1) AS replication_num_tries_replicas, countIf(num_tries>100) AS replication_too_many_tries_replicas FROM system.replication_queue" + systemReplicationNumTriesSQL = "SELECT countIf(num_tries>1) AS replication_num_tries_replicas, countIf(num_tries>100) AS replication_too_many_tries_replicas FROM system.replication_queue SETTINGS empty_result_for_aggregation_by_empty_set=0" - systemDetachedPartsSQL = "SELECT count() AS detached_parts FROM system.detached_parts" + systemDetachedPartsSQL = "SELECT count() AS detached_parts FROM system.detached_parts SETTINGS empty_result_for_aggregation_by_empty_set=0" systemDictionariesSQL = "SELECT origin, status, bytes_allocated FROM system.dictionaries" - systemMutationSQL = "SELECT countIf(latest_fail_time>toDateTime('0000-00-00 00:00:00') AND is_done=0) AS failed, countIf(latest_fail_time=toDateTime('0000-00-00 00:00:00') AND is_done=0) AS running, countIf(is_done=1) AS completed FROM system.mutations" + systemMutationSQL = "SELECT countIf(latest_fail_time>toDateTime('0000-00-00 00:00:00') AND is_done=0) AS failed, countIf(latest_fail_time=toDateTime('0000-00-00 00:00:00') AND is_done=0) AS running, countIf(is_done=1) AS completed FROM system.mutations SETTINGS empty_result_for_aggregation_by_empty_set=0" systemDisksSQL = "SELECT name, path, toUInt64(100*free_space / total_space) AS free_space_percent, toUInt64( 100 * keep_free_space / total_space) AS keep_free_space_percent FROM system.disks" - systemProcessesSQL = "SELECT multiIf(positionCaseInsensitive(query,'select')=1,'select',positionCaseInsensitive(query,'insert')=1,'insert','other') AS query_type, quantile\n(0.5)(elapsed) AS p50, quantile(0.9)(elapsed) AS p90, max(elapsed) AS longest_running FROM system.processes GROUP BY query_type" + systemProcessesSQL = "SELECT multiIf(positionCaseInsensitive(query,'select')=1,'select',positionCaseInsensitive(query,'insert')=1,'insert','other') AS query_type, quantile\n(0.5)(elapsed) AS p50, quantile(0.9)(elapsed) AS p90, max(elapsed) AS longest_running FROM system.processes GROUP BY query_type SETTINGS empty_result_for_aggregation_by_empty_set=0" systemTextLogExistsSQL = "SELECT count() AS text_log_exists FROM system.tables WHERE database='system' AND name='text_log'" - systemTextLogSQL = "SELECT count() AS messages_last_10_min, level FROM system.text_log WHERE level <= 'Notice' AND event_time >= now() - INTERVAL 600 SECOND GROUP BY level" + systemTextLogSQL = "SELECT count() AS messages_last_10_min, level FROM system.text_log WHERE level <= 'Notice' AND event_time >= now() - INTERVAL 600 SECOND GROUP BY level SETTINGS empty_result_for_aggregation_by_empty_set=0" ) var commonMetrics = map[string]string{ @@ -663,4 +667,10 @@ var commonMetrics = map[string]string{ "asynchronous_metrics": systemAsyncMetricsSQL, } +var commonMetricsIsFloat = map[string]bool{ + "events": false, + "metrics": false, + "asynchronous_metrics": true, +} + var _ telegraf.ServiceInput = &ClickHouse{} diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go index 68a4438442d12..b342e6872c37c 100644 --- a/plugins/inputs/clickhouse/clickhouse_test.go +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -8,28 +8,28 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestClusterIncludeExcludeFilter(t *testing.T) { ch := ClickHouse{} - if assert.Equal(t, "", ch.clusterIncludeExcludeFilter()) { - ch.ClusterExclude = []string{"test_cluster"} - assert.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + require.Equal(t, "", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{"test_cluster"} + require.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{"test_cluster"} - ch.ClusterInclude = []string{"cluster"} - assert.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{"test_cluster"} + ch.ClusterInclude = []string{"cluster"} + require.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{} - ch.ClusterInclude = []string{"cluster1", "cluster2"} - assert.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{} + ch.ClusterInclude = []string{"cluster1", "cluster2"} + require.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{"cluster1", "cluster2"} - ch.ClusterInclude = []string{} - assert.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) - } + ch.ClusterExclude = []string{"cluster1", "cluster2"} + ch.ClusterInclude = []string{} + require.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) } func TestChInt64(t *testing.T) { @@ -42,9 +42,9 @@ func TestChInt64(t *testing.T) { } for src, expected := range assets { var v chUInt64 - if err := v.UnmarshalJSON([]byte(src)); assert.NoError(t, err) { - assert.Equal(t, expected, uint64(v)) - } + err := v.UnmarshalJSON([]byte(src)) + require.NoError(t, err) + require.Equal(t, expected, uint64(v)) } } @@ -57,7 +57,7 @@ func TestGather(t *testing.T) { enc := json.NewEncoder(w) switch query := r.URL.Query().Get("query"); { case strings.Contains(query, "system.parts"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Database string `json:"database"` Table string `json:"table"` @@ -74,8 +74,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.events"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -90,8 +91,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.metrics"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -106,8 +108,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.asynchronous_metrics"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -122,8 +125,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "zk_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkExists chUInt64 `json:"zk_exists"` }{ @@ -132,8 +136,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "zk_root_nodes"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkRootNodes chUInt64 `json:"zk_root_nodes"` }{ @@ -142,8 +147,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ReplicationQueueExists chUInt64 `json:"replication_queue_exists"` }{ @@ -152,8 +158,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "replication_too_many_tries_replicas"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TooManyTriesReplicas chUInt64 `json:"replication_too_many_tries_replicas"` NumTriesReplicas chUInt64 `json:"replication_num_tries_replicas"` @@ -164,8 +171,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.detached_parts"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { DetachedParts chUInt64 `json:"detached_parts"` }{ @@ -174,8 +182,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.dictionaries"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Origin string `json:"origin"` Status string `json:"status"` @@ -188,8 +197,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.mutations"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Failed chUInt64 `json:"failed"` Completed chUInt64 `json:"completed"` @@ -202,8 +212,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.disks"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Name string `json:"name"` Path string `json:"path"` @@ -218,8 +229,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.processes"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { QueryType string `json:"query_type"` Percentile50 float64 `json:"p50"` @@ -246,8 +258,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "text_log_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TextLogExists chUInt64 `json:"text_log_exists"` }{ @@ -256,8 +269,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.text_log"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Level string `json:"level"` LastMessagesLast10Min chUInt64 `json:"messages_last_10_min"` @@ -284,6 +298,7 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -294,7 +309,7 @@ func TestGather(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) acc.AssertContainsTaggedFields(t, "clickhouse_tables", map[string]interface{}{ @@ -322,8 +337,8 @@ func TestGather(t *testing.T) { ) acc.AssertContainsFields(t, "clickhouse_asynchronous_metrics", map[string]interface{}{ - "test_system_asynchronous_metric": uint64(1000), - "test_system_asynchronous_metric2": uint64(2000), + "test_system_asynchronous_metric": float64(1000), + "test_system_asynchronous_metric2": float64(2000), }, ) acc.AssertContainsFields(t, "clickhouse_zookeeper", @@ -427,7 +442,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { enc := json.NewEncoder(w) switch query := r.URL.Query().Get("query"); { case strings.Contains(query, "zk_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkExists chUInt64 `json:"zk_exists"` }{ @@ -436,8 +451,9 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ReplicationQueueExists chUInt64 `json:"replication_queue_exists"` }{ @@ -446,8 +462,9 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "text_log_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TextLogExists chUInt64 `json:"text_log_exists"` }{ @@ -456,6 +473,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -467,7 +485,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) acc.AssertDoesNotContainMeasurement(t, "clickhouse_zookeeper") acc.AssertDoesNotContainMeasurement(t, "clickhouse_replication_queue") @@ -482,9 +500,10 @@ func TestWrongJSONMarshalling(t *testing.T) { } enc := json.NewEncoder(w) //wrong data section json - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct{}{}, }) + require.NoError(t, err) })) ch = &ClickHouse{ Servers: []string{ @@ -495,9 +514,9 @@ func TestWrongJSONMarshalling(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) - assert.Equal(t, 0, len(acc.Metrics)) + require.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ "clickhouse_events", "clickhouse_metrics", @@ -512,7 +531,7 @@ func TestWrongJSONMarshalling(t *testing.T) { "clickhouse_processes", "clickhouse_text_log", } - assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) + require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } func TestOfflineServer(t *testing.T) { @@ -528,9 +547,9 @@ func TestOfflineServer(t *testing.T) { }, } ) - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) - assert.Equal(t, 0, len(acc.Metrics)) + require.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ "clickhouse_events", "clickhouse_metrics", @@ -545,7 +564,7 @@ func TestOfflineServer(t *testing.T) { "clickhouse_processes", "clickhouse_text_log", } - assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) + require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } func TestAutoDiscovery(t *testing.T) { @@ -555,9 +574,9 @@ func TestAutoDiscovery(t *testing.T) { Data interface{} `json:"data"` } enc := json.NewEncoder(w) - switch query := r.URL.Query().Get("query"); { - case strings.Contains(query, "system.clusters"): - enc.Encode(result{ + query := r.URL.Query().Get("query") + if strings.Contains(query, "system.clusters") { + err := enc.Encode(result{ Data: []struct { Cluster string `json:"test"` Hostname string `json:"localhost"` @@ -570,6 +589,7 @@ func TestAutoDiscovery(t *testing.T) { }, }, }) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -582,6 +602,5 @@ func TestAutoDiscovery(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) - + require.NoError(t, ch.Gather(acc)) } diff --git a/plugins/inputs/clickhouse/dev/docker-compose.yml b/plugins/inputs/clickhouse/dev/docker-compose.yml index c34ee9320d931..22fb2b2d94295 100644 --- a/plugins/inputs/clickhouse/dev/docker-compose.yml +++ b/plugins/inputs/clickhouse/dev/docker-compose.yml @@ -5,16 +5,19 @@ services: # choose `:latest` after resolve https://github.com/ClickHouse/ClickHouse/issues/13057 image: docker.io/yandex/clickhouse-server:${CLICKHOUSE_VERSION:-latest} volumes: + - ./init_schema.sql:/docker-entrypoint-initdb.d/init_schema.sql - ./test_dictionary.xml:/etc/clickhouse-server/01-test_dictionary.xml - ./zookeeper.xml:/etc/clickhouse-server/config.d/00-zookeeper.xml - ./tls_settings.xml:/etc/clickhouse-server/config.d/01-tls_settings.xml # please comment text_log.xml when CLICKHOUSE_VERSION = 19.16 - ./text_log.xml:/etc/clickhouse-server/config.d/02-text_log.xml - ./part_log.xml:/etc/clickhouse-server/config.d/03-part_log.xml + - ./mysql_port.xml:/etc/clickhouse-server/config.d/04-mysql_port.xml - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem - ../../../../testutil/pki/serverkey.pem:/etc/clickhouse-server/server.key - ../../../../testutil/pki/servercert.pem:/etc/clickhouse-server/server.crt ports: + - 3306:3306 - 8123:8123 - 8443:8443 - 9000:9000 diff --git a/plugins/inputs/clickhouse/dev/init_schema.sql b/plugins/inputs/clickhouse/dev/init_schema.sql new file mode 100644 index 0000000000000..85cd2e3a0d552 --- /dev/null +++ b/plugins/inputs/clickhouse/dev/init_schema.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS default.test; +CREATE TABLE default.test( + Nom String, + Code Nullable(String) DEFAULT Null, + Cur Nullable(String) DEFAULT Null +) ENGINE=MergeTree() ORDER BY tuple(); diff --git a/plugins/inputs/clickhouse/dev/mysql_port.xml b/plugins/inputs/clickhouse/dev/mysql_port.xml new file mode 100644 index 0000000000000..275ec42bba2ae --- /dev/null +++ b/plugins/inputs/clickhouse/dev/mysql_port.xml @@ -0,0 +1,3 @@ + + 3306 + diff --git a/plugins/inputs/clickhouse/dev/test_dictionary.xml b/plugins/inputs/clickhouse/dev/test_dictionary.xml index 2f8f1ae5e26c5..b7472001452cb 100644 --- a/plugins/inputs/clickhouse/dev/test_dictionary.xml +++ b/plugins/inputs/clickhouse/dev/test_dictionary.xml @@ -1,11 +1,11 @@ - - Nom - - - + Nom String + - - Code - String - - - Cur - String - + + + + Code + String + + + + Cur + String + @@ -40,8 +40,8 @@ LIFETIME(MIN 300 MAX 600); 3306 - wrong - wrong + default + 127.0.0.1 1 @@ -56,8 +56,7 @@ LIFETIME(MIN 300 MAX 600); - - - + + 300 diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index b418274f3b34a..806f84f61340d 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -10,6 +10,7 @@ import ( "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -31,10 +32,10 @@ type PubSub struct { Subscription string `toml:"subscription"` // Subscription ReceiveSettings - MaxExtension internal.Duration `toml:"max_extension"` - MaxOutstandingMessages int `toml:"max_outstanding_messages"` - MaxOutstandingBytes int `toml:"max_outstanding_bytes"` - MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` + MaxExtension config.Duration `toml:"max_extension"` + MaxOutstandingMessages int `toml:"max_outstanding_messages"` + MaxOutstandingBytes int `toml:"max_outstanding_bytes"` + MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` // Agent settings MaxMessageLen int `toml:"max_message_len"` @@ -67,7 +68,7 @@ func (ps *PubSub) SampleConfig() string { } // Gather does nothing for this service input. -func (ps *PubSub) Gather(acc telegraf.Accumulator) error { +func (ps *PubSub) Gather(_ telegraf.Accumulator) error { return nil } @@ -180,7 +181,7 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { if err != nil { return fmt.Errorf("unable to base64 decode message: %v", err) } - data = []byte(strData) + data = strData } else { data = msg.Data() } @@ -269,15 +270,15 @@ func (ps *PubSub) getPubSubClient() (*pubsub.Client, error) { return client, nil } -func (ps *PubSub) getGCPSubscription(subId string) (subscription, error) { +func (ps *PubSub) getGCPSubscription(subID string) (subscription, error) { client, err := ps.getPubSubClient() if err != nil { return nil, err } - s := client.Subscription(subId) + s := client.Subscription(subID) s.ReceiveSettings = pubsub.ReceiveSettings{ NumGoroutines: ps.MaxReceiverGoRoutines, - MaxExtension: ps.MaxExtension.Duration, + MaxExtension: time.Duration(ps.MaxExtension), MaxOutstandingMessages: ps.MaxOutstandingMessages, MaxOutstandingBytes: ps.MaxOutstandingBytes, } @@ -312,8 +313,8 @@ const sampleConfig = ` ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" - ## Optional. Number of seconds to wait before attempting to restart the - ## PubSub subscription receiver after an unexpected error. + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. ## If the streaming pull for a PubSub Subscription fails (receiver), ## the agent attempts to restart receiving messages after this many seconds. # retry_delay_seconds = 5 @@ -362,7 +363,7 @@ const sampleConfig = ` ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 - ## Optional. If true, Telegraf will attempt to base64 decode the + ## Optional. If true, Telegraf will attempt to base64 decode the ## PubSub message data before parsing # base64_data = false ` diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index 2045cf4ccbc89..d07dfe34f2290 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -16,12 +16,12 @@ const ( // Test ingesting InfluxDB-format PubSub message func TestRunParse(t *testing.T) { - subId := "sub-run-parse" + subID := "sub-run-parse" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -31,7 +31,7 @@ func TestRunParse(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } @@ -60,12 +60,12 @@ func TestRunParse(t *testing.T) { // Test ingesting InfluxDB-format PubSub message func TestRunBase64(t *testing.T) { - subId := "sub-run-base64" + subID := "sub-run-base64" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -75,7 +75,7 @@ func TestRunBase64(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, Base64Data: true, } @@ -104,12 +104,12 @@ func TestRunBase64(t *testing.T) { } func TestRunInvalidMessages(t *testing.T) { - subId := "sub-invalid-messages" + subID := "sub-invalid-messages" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -119,7 +119,7 @@ func TestRunInvalidMessages(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } @@ -149,14 +149,14 @@ func TestRunInvalidMessages(t *testing.T) { } func TestRunOverlongMessages(t *testing.T) { - subId := "sub-message-too-long" + subID := "sub-message-too-long" acc := &testutil.Accumulator{} testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -166,7 +166,7 @@ func TestRunOverlongMessages(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, // Add MaxMessageLen Param MaxMessageLen: 1, @@ -196,25 +196,25 @@ func TestRunOverlongMessages(t *testing.T) { } func TestRunErrorInSubscriber(t *testing.T) { - subId := "sub-unexpected-error" + subID := "sub-unexpected-error" acc := &testutil.Accumulator{} testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } fakeErrStr := "a fake error" - sub.receiver = testMessagesError(sub, errors.New("a fake error")) + sub.receiver = testMessagesError(errors.New("a fake error")) ps := &PubSub{ Log: testutil.Logger{}, parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, RetryReceiveDelaySeconds: 1, } diff --git a/plugins/inputs/cloud_pubsub/subscription_stub.go b/plugins/inputs/cloud_pubsub/subscription_stub.go index e061728caf7fe..1e5bd009bc138 100644 --- a/plugins/inputs/cloud_pubsub/subscription_stub.go +++ b/plugins/inputs/cloud_pubsub/subscription_stub.go @@ -22,7 +22,7 @@ func (s *stubSub) Receive(ctx context.Context, f func(context.Context, message)) type receiveFunc func(ctx context.Context, f func(context.Context, message)) error -func testMessagesError(s *stubSub, expectedErr error) receiveFunc { +func testMessagesError(expectedErr error) receiveFunc { return func(ctx context.Context, f func(context.Context, message)) error { return expectedErr } diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index b320daedbacc1..48329e1cd362e 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -5,14 +5,13 @@ import ( "crypto/subtle" "encoding/base64" "encoding/json" - "io/ioutil" - "net" + "io" "net/http" "sync" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -28,9 +27,9 @@ type PubSubPush struct { ServiceAddress string Token string Path string - ReadTimeout internal.Duration - WriteTimeout internal.Duration - MaxBodySize internal.Size + ReadTimeout config.Duration + WriteTimeout config.Duration + MaxBodySize config.Size AddMeta bool Log telegraf.Logger @@ -39,13 +38,12 @@ type PubSubPush struct { tlsint.ServerConfig parsers.Parser - listener net.Listener - server *http.Server - acc telegraf.TrackingAccumulator - ctx context.Context - cancel context.CancelFunc - wg *sync.WaitGroup - mu *sync.Mutex + server *http.Server + acc telegraf.TrackingAccumulator + ctx context.Context + cancel context.CancelFunc + wg *sync.WaitGroup + mu *sync.Mutex undelivered map[telegraf.TrackingID]chan bool sem chan struct{} @@ -131,15 +129,15 @@ func (p *PubSubPush) SetParser(parser parsers.Parser) { // Start starts the http listener service. func (p *PubSubPush) Start(acc telegraf.Accumulator) error { - if p.MaxBodySize.Size == 0 { - p.MaxBodySize.Size = defaultMaxBodySize + if p.MaxBodySize == 0 { + p.MaxBodySize = config.Size(defaultMaxBodySize) } - if p.ReadTimeout.Duration < time.Second { - p.ReadTimeout.Duration = time.Second * 10 + if p.ReadTimeout < config.Duration(time.Second) { + p.ReadTimeout = config.Duration(time.Second * 10) } - if p.WriteTimeout.Duration < time.Second { - p.WriteTimeout.Duration = time.Second * 10 + if p.WriteTimeout < config.Duration(time.Second) { + p.WriteTimeout = config.Duration(time.Second * 10) } tlsConf, err := p.ServerConfig.TLSConfig() @@ -149,8 +147,8 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { p.server = &http.Server{ Addr: p.ServiceAddress, - Handler: http.TimeoutHandler(p, p.WriteTimeout.Duration, "timed out processing metric"), - ReadTimeout: p.ReadTimeout.Duration, + Handler: http.TimeoutHandler(p, time.Duration(p.WriteTimeout), "timed out processing metric"), + ReadTimeout: time.Duration(p.ReadTimeout), TLSConfig: tlsConf, } @@ -171,9 +169,13 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { go func() { defer p.wg.Done() if tlsConf != nil { - p.server.ListenAndServeTLS("", "") + if err := p.server.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { + p.Log.Errorf("listening and serving TLS failed: %v", err) + } } else { - p.server.ListenAndServe() + if err := p.server.ListenAndServe(); err != nil { + p.Log.Errorf("listening and serving TLS failed: %v", err) + } } }() @@ -183,6 +185,7 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { // Stop cleans up all resources func (p *PubSubPush) Stop() { p.cancel() + //nolint:errcheck,revive // we cannot do anything if the shutdown fails p.server.Shutdown(p.ctx) p.wg.Wait() } @@ -208,7 +211,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { } // Check that the content length is not too large for us to handle. - if req.ContentLength > p.MaxBodySize.Size { + if req.ContentLength > int64(p.MaxBodySize) { res.WriteHeader(http.StatusRequestEntityTooLarge) return } @@ -218,8 +221,8 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { return } - body := http.MaxBytesReader(res, req.Body, p.MaxBodySize.Size) - bytes, err := ioutil.ReadAll(body) + body := http.MaxBytesReader(res, req.Body, int64(p.MaxBodySize)) + bytes, err := io.ReadAll(body) if err != nil { res.WriteHeader(http.StatusRequestEntityTooLarge) return diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index ae7601b20cccc..0523375229429 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -119,15 +119,13 @@ func TestServeHTTP(t *testing.T) { rr := httptest.NewRecorder() pubPush := &PubSubPush{ - Log: testutil.Logger{}, - Path: "/", - MaxBodySize: internal.Size{ - Size: test.maxsize, - }, + Log: testutil.Logger{}, + Path: "/", + MaxBodySize: config.Size(test.maxsize), sem: make(chan struct{}, 1), undelivered: make(map[telegraf.TrackingID]chan bool), mu: &sync.Mutex{}, - WriteTimeout: internal.Duration{Duration: time.Second * 1}, + WriteTimeout: config.Duration(time.Second * 1), } pubPush.ctx, pubPush.cancel = context.WithCancel(context.Background()) @@ -144,7 +142,7 @@ func TestServeHTTP(t *testing.T) { pubPush.SetParser(p) dst := make(chan telegraf.Metric, 1) - ro := models.NewRunningOutput("test", &testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) + ro := models.NewRunningOutput(&testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) pubPush.acc = agent.NewAccumulator(&testMetricMaker{}, dst).WithTracking(1) wg.Add(1) @@ -154,15 +152,16 @@ func TestServeHTTP(t *testing.T) { }() wg.Add(1) - go func(status int, d chan telegraf.Metric) { + go func(d chan telegraf.Metric) { defer wg.Done() for m := range d { ro.AddMetric(m) + //nolint:errcheck,revive // test will fail anyway if the write fails ro.Write() } - }(test.status, dst) + }(dst) - ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration) + ctx, cancel := context.WithTimeout(req.Context(), time.Duration(pubPush.WriteTimeout)) req = req.WithContext(ctx) pubPush.ServeHTTP(rr, req) @@ -218,7 +217,7 @@ func (*testOutput) SampleConfig() string { return "" } -func (t *testOutput) Write(metrics []telegraf.Metric) error { +func (t *testOutput) Write(_ []telegraf.Metric) error { if t.failWrite { return fmt.Errorf("failed write") } diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index bc7b9b50c5d80..97592f5197ab7 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -16,24 +16,28 @@ API endpoint. In the following order the plugin will attempt to authenticate. ### Configuration: ```toml +# Pull Metric Statistics from Amazon CloudWatch [[inputs.cloudwatch]] ## Amazon Region region = "us-east-1" ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile - # access_key = "" - # secret_key = "" - # token = "" - # role_arn = "" - # profile = "" - # shared_credential_file = "" + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" + #profile = "" + #shared_credential_file = "" ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the @@ -41,6 +45,9 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## ex: endpoint_url = "http://localhost:8000" # endpoint_url = "" + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) + # http_proxy_url = "http://localhost:8888" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -68,8 +75,10 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## Configure the TTL for the internal cache of metrics. # cache_ttl = "1h" - ## Metric Statistic Namespace (required) - namespace = "AWS/ELB" + ## Metric Statistic Namespaces (required) + namespaces = ["AWS/ELB"] + # A single metric statistic namespace that will be appended to namespaces on startup + # namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -98,6 +107,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. # # ## Dimension filters for Metric. All dimensions defined for the metric names # ## must be specified in order to retrieve the metric statistics. + # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index d1f5661a03eba..17305f31c93a6 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -1,6 +1,7 @@ package cloudwatch import ( + "context" "fmt" "net" "net/http" @@ -9,35 +10,41 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/aws" + cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" - "github.com/influxdata/telegraf/metric" + internalMetric "github.com/influxdata/telegraf/metric" + internalProxy "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + StatisticAverage = "Average" + StatisticMaximum = "Maximum" + StatisticMinimum = "Minimum" + StatisticSum = "Sum" + StatisticSampleCount = "SampleCount" +) + // CloudWatch contains the configuration and cache for the cloudwatch plugin. type CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - CredentialPath string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` StatisticExclude []string `toml:"statistic_exclude"` StatisticInclude []string `toml:"statistic_include"` Timeout config.Duration `toml:"timeout"` + internalProxy.HTTPProxy + Period config.Duration `toml:"period"` Delay config.Duration `toml:"delay"` Namespace string `toml:"namespace"` + Namespaces []string `toml:"namespaces"` Metrics []*Metric `toml:"metrics"` CacheTTL config.Duration `toml:"cache_ttl"` RateLimit int `toml:"ratelimit"` @@ -51,6 +58,8 @@ type CloudWatch struct { queryDimensions map[string]*map[string]string windowStart time.Time windowEnd time.Time + + internalaws.CredentialConfig } // Metric defines a simplified Cloudwatch metric. @@ -63,8 +72,9 @@ type Metric struct { // Dimension defines a simplified Cloudwatch dimension (provides metric filtering). type Dimension struct { - Name string `toml:"name"` - Value string `toml:"value"` + Name string `toml:"name"` + Value string `toml:"value"` + valueMatcher filter.Filter } // metricCache caches metrics, their filters, and generated queries. @@ -72,12 +82,12 @@ type metricCache struct { ttl time.Duration built time.Time metrics []filteredMetric - queries []*cloudwatch.MetricDataQuery + queries map[string][]types.MetricDataQuery } type cloudwatchClient interface { - ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) - GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) + ListMetrics(context.Context, *cwClient.ListMetricsInput, ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) + GetMetricData(context.Context, *cwClient.GetMetricDataInput, ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) } // SampleConfig returns the default configuration of the Cloudwatch input plugin. @@ -88,16 +98,19 @@ func (c *CloudWatch) SampleConfig() string { ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile # access_key = "" # secret_key = "" # token = "" # role_arn = "" + # web_identity_token_file = "" + # role_session_name = "" # profile = "" # shared_credential_file = "" @@ -107,6 +120,9 @@ func (c *CloudWatch) SampleConfig() string { ## ex: endpoint_url = "http://localhost:8000" # endpoint_url = "" + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) + # http_proxy_url = "http://localhost:8888" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -134,8 +150,10 @@ func (c *CloudWatch) SampleConfig() string { ## Configure the TTL for the internal cache of metrics. # cache_ttl = "1h" - ## Metric Statistic Namespace (required) - namespace = "AWS/ELB" + ## Metric Statistic Namespaces (required) + namespaces = ["AWS/ELB"] + # A single metric statistic namespace that will be appended to namespaces on startup + # namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -164,6 +182,7 @@ func (c *CloudWatch) SampleConfig() string { # # ## Dimension filters for Metric. All dimensions defined for the metric names # ## must be specified in order to retrieve the metric statistics. + # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" @@ -175,35 +194,37 @@ func (c *CloudWatch) Description() string { return "Pull Metric Statistics from Amazon CloudWatch" } -// Gather takes in an accumulator and adds the metrics that the Input -// gathers. This is called every "interval". -func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { - if c.statFilter == nil { - var err error - // Set config level filter (won't change throughout life of plugin). - c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) - if err != nil { - return err - } +func (c *CloudWatch) Init() error { + if len(c.Namespace) != 0 { + c.Namespaces = append(c.Namespaces, c.Namespace) } - if c.client == nil { - c.initializeCloudWatch() + err := c.initializeCloudWatch() + if err != nil { + return err } - filteredMetrics, err := getFilteredMetrics(c) + // Set config level filter (won't change throughout life of plugin). + c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) if err != nil { return err } - c.updateWindow(time.Now()) + return nil +} - // Get all of the possible queries so we can send groups of 100. - queries, err := c.getDataQueries(filteredMetrics) +// Gather takes in an accumulator and adds the metrics that the Input +// gathers. This is called every "interval". +func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { + filteredMetrics, err := getFilteredMetrics(c) if err != nil { return err } + c.updateWindow(time.Now()) + + // Get all of the possible queries so we can send groups of 100. + queries := c.getDataQueries(filteredMetrics) if len(queries) == 0 { return nil } @@ -216,32 +237,34 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { wg := sync.WaitGroup{} rLock := sync.Mutex{} - results := []*cloudwatch.MetricDataResult{} + results := map[string][]types.MetricDataResult{} - // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. - batchSize := 500 - var batches [][]*cloudwatch.MetricDataQuery + for namespace, namespacedQueries := range queries { + // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. + batchSize := 500 + var batches [][]types.MetricDataQuery - for batchSize < len(queries) { - queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize]) - } - batches = append(batches, queries) - - for i := range batches { - wg.Add(1) - <-lmtr.C - go func(inm []*cloudwatch.MetricDataQuery) { - defer wg.Done() - result, err := c.gatherMetrics(c.getDataInputs(inm)) - if err != nil { - acc.AddError(err) - return - } + for batchSize < len(namespacedQueries) { + namespacedQueries, batches = namespacedQueries[batchSize:], append(batches, namespacedQueries[0:batchSize:batchSize]) + } + batches = append(batches, namespacedQueries) + + for i := range batches { + wg.Add(1) + <-lmtr.C + go func(n string, inm []types.MetricDataQuery) { + defer wg.Done() + result, err := c.gatherMetrics(c.getDataInputs(inm)) + if err != nil { + acc.AddError(err) + return + } - rLock.Lock() - results = append(results, result...) - rLock.Unlock() - }(batches[i]) + rLock.Lock() + results[n] = append(results[n], result...) + rLock.Unlock() + }(namespace, batches[i]) + } } wg.Wait() @@ -249,24 +272,24 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { return c.aggregateMetrics(acc, results) } -func (c *CloudWatch) initializeCloudWatch() { - credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.CredentialPath, - Token: c.Token, - EndpointURL: c.EndpointURL, +func (c *CloudWatch) initializeCloudWatch() error { + proxy, err := c.HTTPProxy.Proxy() + if err != nil { + return err + } + + cfg, err := c.CredentialConfig.Credentials() + if err != nil { + return err } - configProvider := credentialConfig.Credentials() + c.client = cwClient.NewFromConfig(cfg, func(options *cwClient.Options) { + // Disable logging + options.ClientLogMode = 0 - cfg := &aws.Config{ - HTTPClient: &http.Client{ + options.HTTPClient = &http.Client{ // use values from DefaultTransport Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: proxy, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, @@ -278,15 +301,26 @@ func (c *CloudWatch) initializeCloudWatch() { ExpectContinueTimeout: 1 * time.Second, }, Timeout: time.Duration(c.Timeout), - }, + } + }) + + // Initialize regex matchers for each Dimension value. + for _, m := range c.Metrics { + for _, dimension := range m.Dimensions { + matcher, err := filter.NewIncludeExcludeFilter([]string{dimension.Value}, nil) + if err != nil { + return err + } + + dimension.valueMatcher = matcher + } } - loglevel := aws.LogOff - c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) + return nil } type filteredMetric struct { - metrics []*cloudwatch.Metric + metrics []types.Metric statFilter filter.Filter } @@ -301,21 +335,23 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { // check for provided metric filter if c.Metrics != nil { for _, m := range c.Metrics { - metrics := []*cloudwatch.Metric{} + metrics := []types.Metric{} if !hasWildcard(m.Dimensions) { - dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) + dimensions := make([]types.Dimension, len(m.Dimensions)) for k, d := range m.Dimensions { - dimensions[k] = &cloudwatch.Dimension{ + dimensions[k] = types.Dimension{ Name: aws.String(d.Name), Value: aws.String(d.Value), } } for _, name := range m.MetricNames { - metrics = append(metrics, &cloudwatch.Metric{ - Namespace: aws.String(c.Namespace), - MetricName: aws.String(name), - Dimensions: dimensions, - }) + for _, namespace := range c.Namespaces { + metrics = append(metrics, types.Metric{ + Namespace: aws.String(namespace), + MetricName: aws.String(name), + Dimensions: dimensions, + }) + } } } else { allMetrics, err := c.fetchNamespaceMetrics() @@ -325,11 +361,13 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { for _, name := range m.MetricNames { for _, metric := range allMetrics { if isSelected(name, metric, m.Dimensions) { - metrics = append(metrics, &cloudwatch.Metric{ - Namespace: aws.String(c.Namespace), - MetricName: aws.String(name), - Dimensions: metric.Dimensions, - }) + for _, namespace := range c.Namespaces { + metrics = append(metrics, types.Metric{ + Namespace: aws.String(namespace), + MetricName: aws.String(name), + Dimensions: metric.Dimensions, + }) + } } } } @@ -373,40 +411,36 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { } // fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace. -func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { - metrics := []*cloudwatch.Metric{} +func (c *CloudWatch) fetchNamespaceMetrics() ([]types.Metric, error) { + metrics := []types.Metric{} var token *string - var params *cloudwatch.ListMetricsInput - var recentlyActive *string = nil - - switch c.RecentlyActive { - case "PT3H": - recentlyActive = &c.RecentlyActive - default: - recentlyActive = nil + + params := &cwClient.ListMetricsInput{ + Dimensions: []types.DimensionFilter{}, + NextToken: token, + MetricName: nil, } - params = &cloudwatch.ListMetricsInput{ - Namespace: aws.String(c.Namespace), - Dimensions: []*cloudwatch.DimensionFilter{}, - NextToken: token, - MetricName: nil, - RecentlyActive: recentlyActive, + if c.RecentlyActive == "PT3H" { + params.RecentlyActive = types.RecentlyActivePt3h } - for { - resp, err := c.client.ListMetrics(params) - if err != nil { - return nil, err - } - metrics = append(metrics, resp.Metrics...) - if resp.NextToken == nil { - break - } + for _, namespace := range c.Namespaces { + params.Namespace = aws.String(namespace) + for { + resp, err := c.client.ListMetrics(context.Background(), params) + if err != nil { + return nil, fmt.Errorf("failed to list metrics with params per namespace: %v", err) + } - params.NextToken = resp.NextToken - } + metrics = append(metrics, resp.Metrics...) + if resp.NextToken == nil { + break + } + params.NextToken = resp.NextToken + } + } return metrics, nil } @@ -425,75 +459,75 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) { } // getDataQueries gets all of the possible queries so we can maximize the request payload. -func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) { +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string][]types.MetricDataQuery { if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { - return c.metricCache.queries, nil + return c.metricCache.queries } c.queryDimensions = map[string]*map[string]string{} - dataQueries := []*cloudwatch.MetricDataQuery{} + dataQueries := map[string][]types.MetricDataQuery{} for i, filtered := range filteredMetrics { for j, metric := range filtered.metrics { id := strconv.Itoa(j) + "_" + strconv.Itoa(i) dimension := ctod(metric.Dimensions) if filtered.statFilter.Match("average") { c.queryDimensions["average_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("average_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_average")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticAverage), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticAverage), }, }) } if filtered.statFilter.Match("maximum") { c.queryDimensions["maximum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("maximum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticMaximum), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticMaximum), }, }) } if filtered.statFilter.Match("minimum") { c.queryDimensions["minimum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("minimum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticMinimum), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticMinimum), }, }) } if filtered.statFilter.Match("sum") { c.queryDimensions["sum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("sum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sum")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticSum), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticSum), }, }) } if filtered.statFilter.Match("sample_count") { c.queryDimensions["sample_count_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("sample_count_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticSampleCount), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticSampleCount), }, }) } @@ -502,7 +536,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw if len(dataQueries) == 0 { c.Log.Debug("no metrics found to collect") - return nil, nil + return nil } if c.metricCache == nil { @@ -515,17 +549,17 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw c.metricCache.queries = dataQueries } - return dataQueries, nil + return dataQueries } // gatherMetrics gets metric data from Cloudwatch. func (c *CloudWatch) gatherMetrics( - params *cloudwatch.GetMetricDataInput, -) ([]*cloudwatch.MetricDataResult, error) { - results := []*cloudwatch.MetricDataResult{} + params *cwClient.GetMetricDataInput, +) ([]types.MetricDataResult, error) { + results := []types.MetricDataResult{} for { - resp, err := c.client.GetMetricData(params) + resp, err := c.client.GetMetricData(context.Background(), params) if err != nil { return nil, fmt.Errorf("failed to get metric data: %v", err) } @@ -542,23 +576,28 @@ func (c *CloudWatch) gatherMetrics( func (c *CloudWatch) aggregateMetrics( acc telegraf.Accumulator, - metricDataResults []*cloudwatch.MetricDataResult, + metricDataResults map[string][]types.MetricDataResult, ) error { var ( - grouper = metric.NewSeriesGrouper() - namespace = sanitizeMeasurement(c.Namespace) + grouper = internalMetric.NewSeriesGrouper() ) - for _, result := range metricDataResults { - tags := map[string]string{} + for namespace, results := range metricDataResults { + namespace = sanitizeMeasurement(namespace) - if dimensions, ok := c.queryDimensions[*result.Id]; ok { - tags = *dimensions - } - tags["region"] = c.Region + for _, result := range results { + tags := map[string]string{} + + if dimensions, ok := c.queryDimensions[*result.Id]; ok { + tags = *dimensions + } + tags["region"] = c.Region - for i := range result.Values { - grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]) + for i := range result.Values { + if err := grouper.Add(namespace, tags, result.Timestamps[i], *result.Label, result.Values[i]); err != nil { + acc.AddError(err) + } + } } } @@ -597,13 +636,8 @@ func snakeCase(s string) string { return s } -type dimension struct { - name string - value string -} - // ctod converts cloudwatch dimensions to regular dimensions. -func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { +func ctod(cDimensions []types.Dimension) *map[string]string { dimensions := map[string]string{} for i := range cDimensions { dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value @@ -611,8 +645,8 @@ func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { return &dimensions } -func (c *CloudWatch) getDataInputs(dataQueries []*cloudwatch.MetricDataQuery) *cloudwatch.GetMetricDataInput { - return &cloudwatch.GetMetricDataInput{ +func (c *CloudWatch) getDataInputs(dataQueries []types.MetricDataQuery) *cwClient.GetMetricDataInput { + return &cwClient.GetMetricDataInput{ StartTime: aws.Time(c.windowStart), EndTime: aws.Time(c.windowEnd), MetricDataQueries: dataQueries, @@ -626,14 +660,14 @@ func (f *metricCache) isValid() bool { func hasWildcard(dimensions []*Dimension) bool { for _, d := range dimensions { - if d.Value == "" || d.Value == "*" { + if d.Value == "" || strings.ContainsAny(d.Value, "*?[") { return true } } return false } -func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool { +func isSelected(name string, metric types.Metric, dimensions []*Dimension) bool { if name != *metric.MetricName { return false } @@ -644,7 +678,7 @@ func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) selected := false for _, d2 := range metric.Dimensions { if d.Name == *d2.Name { - if d.Value == "" || d.Value == "*" || d.Value == *d2.Value { + if d.Value == "" || d.valueMatcher.Match(*d2.Value) { selected = true } } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 2983773ad1bb5..9672ff88a2c1b 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -1,28 +1,32 @@ package cloudwatch import ( + "context" + "net/http" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/stretchr/testify/assert" + "github.com/aws/aws-sdk-go-v2/aws" + cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/testutil" ) type mockGatherCloudWatchClient struct{} -func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - return &cloudwatch.ListMetricsOutput{ - Metrics: []*cloudwatch.Metric{ +func (m *mockGatherCloudWatchClient) ListMetrics(_ context.Context, params *cwClient.ListMetricsInput, _ ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) { + return &cwClient.ListMetricsOutput{ + Metrics: []types.Metric{ { Namespace: params.Namespace, MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), @@ -33,78 +37,70 @@ func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsI }, nil } -func (m *mockGatherCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { - return &cloudwatch.GetMetricDataOutput{ - MetricDataResults: []*cloudwatch.MetricDataResult{ +func (m *mockGatherCloudWatchClient) GetMetricData(_ context.Context, params *cwClient.GetMetricDataInput, _ ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) { + return &cwClient.GetMetricDataOutput{ + MetricDataResults: []types.MetricDataResult{ { Id: aws.String("minimum_0_0"), Label: aws.String("latency_minimum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.1), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.1}, }, { Id: aws.String("maximum_0_0"), Label: aws.String("latency_maximum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.3), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.3}, }, { Id: aws.String("average_0_0"), Label: aws.String("latency_average"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.2), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.2}, }, { Id: aws.String("sum_0_0"), Label: aws.String("latency_sum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(123), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{123}, }, { Id: aws.String("sample_count_0_0"), Label: aws.String("latency_sample_count"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(100), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{100}, }, }, }, nil } func TestSnakeCase(t *testing.T) { - assert.Equal(t, "cluster_name", snakeCase("Cluster Name")) - assert.Equal(t, "broker_id", snakeCase("Broker ID")) + require.Equal(t, "cluster_name", snakeCase("Cluster Name")) + require.Equal(t, "broker_id", snakeCase("Broker ID")) } func TestGather(t *testing.T) { duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Region: "us-east-1", + CredentialConfig: internalaws.CredentialConfig{ + Region: "us-east-1", + }, Namespace: "AWS/ELB", Delay: internalDuration, Period: internalDuration, @@ -112,9 +108,10 @@ func TestGather(t *testing.T) { } var acc testutil.Accumulator - c.client = &mockGatherCloudWatchClient{} - assert.NoError(t, acc.GatherError(c.Gather)) + require.NoError(t, c.Init()) + c.client = &mockGatherCloudWatchClient{} + require.NoError(t, acc.GatherError(c.Gather)) fields := map[string]interface{}{} fields["latency_minimum"] = 0.1 @@ -127,14 +124,34 @@ func TestGather(t *testing.T) { tags["region"] = "us-east-1" tags["load_balancer_name"] = "p-example" - assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) + require.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags) } +func TestGather_MultipleNamespaces(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := config.Duration(duration) + c := &CloudWatch{ + Namespaces: []string{"AWS/ELB", "AWS/EC2"}, + Delay: internalDuration, + Period: internalDuration, + RateLimit: 200, + } + + var acc testutil.Accumulator + + require.NoError(t, c.Init()) + c.client = &mockGatherCloudWatchClient{} + require.NoError(t, acc.GatherError(c.Gather)) + + require.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) + require.True(t, acc.HasMeasurement("cloudwatch_aws_ec2")) +} + type mockSelectMetricsCloudWatchClient struct{} -func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - metrics := []*cloudwatch.Metric{} +func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ context.Context, params *cwClient.ListMetricsInput, _ ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) { + metrics := []types.Metric{} // 4 metrics are available metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} // for 3 ELBs @@ -144,10 +161,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM for _, m := range metricNames { for _, lb := range loadBalancers { // For each metric/ELB pair, we get an aggregate value across all AZs. - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -156,10 +173,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM }) for _, az := range availabilityZones { // We get a metric for each metric/ELB/AZ triplet. - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -174,13 +191,13 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM } } - result := &cloudwatch.ListMetricsOutput{ + result := &cwClient.ListMetricsOutput{ Metrics: metrics, } return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ context.Context, params *cwClient.GetMetricDataInput, _ ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) { return nil, nil } @@ -188,7 +205,9 @@ func TestSelectMetrics(t *testing.T) { duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Region: "us-east-1", + CredentialConfig: internalaws.CredentialConfig{ + Region: "us-east-1", + }, Namespace: "AWS/ELB", Delay: internalDuration, Period: internalDuration, @@ -199,96 +218,101 @@ func TestSelectMetrics(t *testing.T) { Dimensions: []*Dimension{ { Name: "LoadBalancerName", - Value: "*", + Value: "lb*", }, { Name: "AvailabilityZone", - Value: "*", + Value: "us-east*", }, }, }, }, } + require.NoError(t, c.Init()) c.client = &mockSelectMetricsCloudWatchClient{} filtered, err := getFilteredMetrics(c) // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 // AZs. We should get 12 metrics. - assert.Equal(t, 12, len(filtered[0].metrics)) - assert.NoError(t, err) + require.Equal(t, 12, len(filtered[0].metrics)) + require.NoError(t, err) } func TestGenerateStatisticsInputParams(t *testing.T) { - d := &cloudwatch.Dimension{ + d := types.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } - m := &cloudwatch.Metric{ + namespace := "AWS/ELB" + m := types.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{d}, + Dimensions: []types.Dimension{d}, + Namespace: aws.String(namespace), } duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Namespace: "AWS/ELB", - Delay: internalDuration, - Period: internalDuration, + Namespaces: []string{namespace}, + Delay: internalDuration, + Period: internalDuration, } - c.initializeCloudWatch() + require.NoError(t, c.initializeCloudWatch()) now := time.Now() c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) - queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) - params := c.getDataInputs(queries) + queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries[namespace]) - assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) require.Len(t, params.MetricDataQueries, 5) - assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) - assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) + require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + require.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { - d := &cloudwatch.Dimension{ + d := types.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } - m := &cloudwatch.Metric{ + namespace := "AWS/ELB" + m := types.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{d}, + Dimensions: []types.Dimension{d}, + Namespace: aws.String(namespace), } duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Namespace: "AWS/ELB", - Delay: internalDuration, - Period: internalDuration, + Namespaces: []string{namespace}, + Delay: internalDuration, + Period: internalDuration, } - c.initializeCloudWatch() + require.NoError(t, c.initializeCloudWatch()) now := time.Now() c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) - queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) - params := c.getDataInputs(queries) + queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries[namespace]) - assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) require.Len(t, params.MetricDataQueries, 2) - assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) - assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) + require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + require.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestMetricsCacheTimeout(t *testing.T) { @@ -298,9 +322,9 @@ func TestMetricsCacheTimeout(t *testing.T) { ttl: time.Minute, } - assert.True(t, cache.isValid()) + require.True(t, cache.isValid()) cache.built = time.Now().Add(-time.Minute) - assert.False(t, cache.isValid()) + require.False(t, cache.isValid()) } func TestUpdateWindow(t *testing.T) { @@ -315,21 +339,41 @@ func TestUpdateWindow(t *testing.T) { now := time.Now() - assert.True(t, c.windowEnd.IsZero()) - assert.True(t, c.windowStart.IsZero()) + require.True(t, c.windowEnd.IsZero()) + require.True(t, c.windowStart.IsZero()) c.updateWindow(now) newStartTime := c.windowEnd // initial window just has a single period - assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, c.windowStart, now.Add(-time.Duration(c.Delay)).Add(-time.Duration(c.Period))) + require.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, c.windowStart, now.Add(-time.Duration(c.Delay)).Add(-time.Duration(c.Period))) now = time.Now() c.updateWindow(now) // subsequent window uses previous end time as start time - assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, c.windowStart, newStartTime) + require.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, c.windowStart, newStartTime) +} + +func TestProxyFunction(t *testing.T) { + c := &CloudWatch{ + HTTPProxy: proxy.HTTPProxy{HTTPProxyURL: "http://www.penguins.com"}, + } + + proxyFunction, err := c.HTTPProxy.Proxy() + require.NoError(t, err) + + proxyResult, err := proxyFunction(&http.Request{}) + require.NoError(t, err) + require.Equal(t, "www.penguins.com", proxyResult.Host) +} + +func TestCombineNamespaces(t *testing.T) { + c := &CloudWatch{Namespace: "AWS/ELB", Namespaces: []string{"AWS/EC2", "AWS/Billing"}} + + require.NoError(t, c.Init()) + require.Equal(t, []string{"AWS/EC2", "AWS/Billing", "AWS/ELB"}, c.Namespaces) } diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index bf6c021c80f4a..d644f7c188fc5 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -1,17 +1,18 @@ +//go:build linux // +build linux package conntrack import ( "fmt" - "io/ioutil" "os" "strconv" "strings" + "path/filepath" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "path/filepath" ) type Conntrack struct { @@ -90,7 +91,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error { continue } - contents, err := ioutil.ReadFile(fName) + contents, err := os.ReadFile(fName) if err != nil { acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err)) continue diff --git a/plugins/inputs/conntrack/conntrack_notlinux.go b/plugins/inputs/conntrack/conntrack_notlinux.go index 11948731bb88d..6ad8e4a10e3c5 100644 --- a/plugins/inputs/conntrack/conntrack_notlinux.go +++ b/plugins/inputs/conntrack/conntrack_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package conntrack diff --git a/plugins/inputs/conntrack/conntrack_test.go b/plugins/inputs/conntrack/conntrack_test.go index 9c144afe84e53..cb33caec2e330 100644 --- a/plugins/inputs/conntrack/conntrack_test.go +++ b/plugins/inputs/conntrack/conntrack_test.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package conntrack import ( - "io/ioutil" "os" "path" "strconv" @@ -11,7 +11,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func restoreDflts(savedFiles, savedDirs []string) { @@ -28,18 +28,18 @@ func TestNoFilesFound(t *testing.T) { acc := &testutil.Accumulator{} err := c.Gather(acc) - assert.EqualError(t, err, "Conntrack input failed to collect metrics. "+ + require.EqualError(t, err, "Conntrack input failed to collect metrics. "+ "Is the conntrack kernel module loaded?") } func TestDefaultsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") - assert.NoError(t, err) + tmpdir, err := os.MkdirTemp("", "tmp1") + require.NoError(t, err) defer os.Remove(tmpdir) - tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count") - assert.NoError(t, err) + tmpFile, err := os.CreateTemp(tmpdir, "ip_conntrack_count") + require.NoError(t, err) defer os.Remove(tmpFile.Name()) dfltDirs = []string{tmpdir} @@ -47,24 +47,25 @@ func TestDefaultsUsed(t *testing.T) { dfltFiles = []string{fname} count := 1234321 - ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660) + require.NoError(t, os.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} - c.Gather(acc) + require.NoError(t, c.Gather(acc)) acc.AssertContainsFields(t, inputName, map[string]interface{}{ fname: float64(count)}) } func TestConfigsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") - assert.NoError(t, err) + tmpdir, err := os.MkdirTemp("", "tmp1") + require.NoError(t, err) defer os.Remove(tmpdir) - cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count") - maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max") - assert.NoError(t, err) + cntFile, err := os.CreateTemp(tmpdir, "nf_conntrack_count") + require.NoError(t, err) + maxFile, err := os.CreateTemp(tmpdir, "nf_conntrack_max") + require.NoError(t, err) defer os.Remove(cntFile.Name()) defer os.Remove(maxFile.Name()) @@ -75,12 +76,12 @@ func TestConfigsUsed(t *testing.T) { count := 1234321 max := 9999999 - ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660) - ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660) + require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} - c.Gather(acc) + require.NoError(t, c.Gather(acc)) fix := func(s string) string { return strings.Replace(s, "nf_", "ip_", 1) diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 659b87c3a1fb6..1acdaea4ac76e 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -1,4 +1,6 @@ # Couchbase Input Plugin +Couchbase is a distributed NoSQL database. +This plugin gets metrics for each Couchbase node, as well as detailed metrics for each bucket, for a given couchbase server. ## Configuration: @@ -15,6 +17,17 @@ ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] + + ## Filter bucket fields to include only here. + # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ``` ## Measurements: @@ -35,7 +48,7 @@ Tags: - cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`) - bucket: the name of the couchbase bucket, e.g., `blastro-df` -Fields: +Default bucket fields: - quota_percent_used (unit: percent, example: 68.85424936294555) - ops_per_sec (unit: count, example: 5686.789686789687) - disk_fetches (unit: count, example: 0.0) @@ -44,6 +57,223 @@ Fields: - data_used (unit: bytes, example: 212179309111.0) - mem_used (unit: bytes, example: 202156957464.0) +Additional fields that can be configured with the `bucket_stats_included` option: +- couch_total_disk_size +- couch_docs_fragmentation +- couch_views_fragmentation +- hit_ratio +- ep_cache_miss_rate +- ep_resident_items_rate +- vb_avg_active_queue_age +- vb_avg_replica_queue_age +- vb_avg_pending_queue_age +- vb_avg_total_queue_age +- vb_active_resident_items_ratio +- vb_replica_resident_items_ratio +- vb_pending_resident_items_ratio +- avg_disk_update_time +- avg_disk_commit_time +- avg_bg_wait_time +- avg_active_timestamp_drift +- avg_replica_timestamp_drift +- ep_dcp_views+indexes_count +- ep_dcp_views+indexes_items_remaining +- ep_dcp_views+indexes_producer_count +- ep_dcp_views+indexes_total_backlog_size +- ep_dcp_views+indexes_items_sent +- ep_dcp_views+indexes_total_bytes +- ep_dcp_views+indexes_backoff +- bg_wait_count +- bg_wait_total +- bytes_read +- bytes_written +- cas_badval +- cas_hits +- cas_misses +- cmd_get +- cmd_lookup +- cmd_set +- couch_docs_actual_disk_size +- couch_docs_data_size +- couch_docs_disk_size +- couch_spatial_data_size +- couch_spatial_disk_size +- couch_spatial_ops +- couch_views_actual_disk_size +- couch_views_data_size +- couch_views_disk_size +- couch_views_ops +- curr_connections +- curr_items +- curr_items_tot +- decr_hits +- decr_misses +- delete_hits +- delete_misses +- disk_commit_count +- disk_commit_total +- disk_update_count +- disk_update_total +- disk_write_queue +- ep_active_ahead_exceptions +- ep_active_hlc_drift +- ep_active_hlc_drift_count +- ep_bg_fetched +- ep_clock_cas_drift_threshold_exceeded +- ep_data_read_failed +- ep_data_write_failed +- ep_dcp_2i_backoff +- ep_dcp_2i_count +- ep_dcp_2i_items_remaining +- ep_dcp_2i_items_sent +- ep_dcp_2i_producer_count +- ep_dcp_2i_total_backlog_size +- ep_dcp_2i_total_bytes +- ep_dcp_cbas_backoff +- ep_dcp_cbas_count +- ep_dcp_cbas_items_remaining +- ep_dcp_cbas_items_sent +- ep_dcp_cbas_producer_count +- ep_dcp_cbas_total_backlog_size +- ep_dcp_cbas_total_bytes +- ep_dcp_eventing_backoff +- ep_dcp_eventing_count +- ep_dcp_eventing_items_remaining +- ep_dcp_eventing_items_sent +- ep_dcp_eventing_producer_count +- ep_dcp_eventing_total_backlog_size +- ep_dcp_eventing_total_bytes +- ep_dcp_fts_backoff +- ep_dcp_fts_count +- ep_dcp_fts_items_remaining +- ep_dcp_fts_items_sent +- ep_dcp_fts_producer_count +- ep_dcp_fts_total_backlog_size +- ep_dcp_fts_total_bytes +- ep_dcp_other_backoff +- ep_dcp_other_count +- ep_dcp_other_items_remaining +- ep_dcp_other_items_sent +- ep_dcp_other_producer_count +- ep_dcp_other_total_backlog_size +- ep_dcp_other_total_bytes +- ep_dcp_replica_backoff +- ep_dcp_replica_count +- ep_dcp_replica_items_remaining +- ep_dcp_replica_items_sent +- ep_dcp_replica_producer_count +- ep_dcp_replica_total_backlog_size +- ep_dcp_replica_total_bytes +- ep_dcp_views_backoff +- ep_dcp_views_count +- ep_dcp_views_items_remaining +- ep_dcp_views_items_sent +- ep_dcp_views_producer_count +- ep_dcp_views_total_backlog_size +- ep_dcp_views_total_bytes +- ep_dcp_xdcr_backoff +- ep_dcp_xdcr_count +- ep_dcp_xdcr_items_remaining +- ep_dcp_xdcr_items_sent +- ep_dcp_xdcr_producer_count +- ep_dcp_xdcr_total_backlog_size +- ep_dcp_xdcr_total_bytes +- ep_diskqueue_drain +- ep_diskqueue_fill +- ep_diskqueue_items +- ep_flusher_todo +- ep_item_commit_failed +- ep_kv_size +- ep_max_size +- ep_mem_high_wat +- ep_mem_low_wat +- ep_meta_data_memory +- ep_num_non_resident +- ep_num_ops_del_meta +- ep_num_ops_del_ret_meta +- ep_num_ops_get_meta +- ep_num_ops_set_meta +- ep_num_ops_set_ret_meta +- ep_num_value_ejects +- ep_oom_errors +- ep_ops_create +- ep_ops_update +- ep_overhead +- ep_queue_size +- ep_replica_ahead_exceptions +- ep_replica_hlc_drift +- ep_replica_hlc_drift_count +- ep_tmp_oom_errors +- ep_vb_total +- evictions +- get_hits +- get_misses +- incr_hits +- incr_misses +- mem_used +- misses +- ops +- timestamp +- vb_active_eject +- vb_active_itm_memory +- vb_active_meta_data_memory +- vb_active_num +- vb_active_num_non_resident +- vb_active_ops_create +- vb_active_ops_update +- vb_active_queue_age +- vb_active_queue_drain +- vb_active_queue_fill +- vb_active_queue_size +- vb_active_sync_write_aborted_count +- vb_active_sync_write_accepted_count +- vb_active_sync_write_committed_count +- vb_pending_curr_items +- vb_pending_eject +- vb_pending_itm_memory +- vb_pending_meta_data_memory +- vb_pending_num +- vb_pending_num_non_resident +- vb_pending_ops_create +- vb_pending_ops_update +- vb_pending_queue_age +- vb_pending_queue_drain +- vb_pending_queue_fill +- vb_pending_queue_size +- vb_replica_curr_items +- vb_replica_eject +- vb_replica_itm_memory +- vb_replica_meta_data_memory +- vb_replica_num +- vb_replica_num_non_resident +- vb_replica_ops_create +- vb_replica_ops_update +- vb_replica_queue_age +- vb_replica_queue_drain +- vb_replica_queue_fill +- vb_replica_queue_size +- vb_total_queue_age +- xdc_ops +- allocstall +- cpu_cores_available +- cpu_irq_rate +- cpu_stolen_rate +- cpu_sys_rate +- cpu_user_rate +- cpu_utilization_rate +- hibernated_requests +- hibernated_waked +- mem_actual_free +- mem_actual_used +- mem_free +- mem_limit +- mem_total +- mem_used_sys +- odp_report_failed +- rest_requests +- swap_total +- swap_used + ## Example output diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index de7f0bec0c9fa..f67e75096cde3 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -1,16 +1,29 @@ package couchbase import ( + "encoding/json" + "net/http" "regexp" "sync" + "time" + + couchbaseClient "github.com/couchbase/go-couchbase" - couchbase "github.com/couchbase/go-couchbase" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type Couchbase struct { Servers []string + + BucketStatsIncluded []string `toml:"bucket_stats_included"` + + bucketInclude filter.Filter + client *http.Client + + tls.ClientConfig } var sampleConfig = ` @@ -24,33 +37,42 @@ var sampleConfig = ` ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] + + ## Filter bucket fields to include only here. + # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ` var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`) -func (r *Couchbase) SampleConfig() string { +func (cb *Couchbase) SampleConfig() string { return sampleConfig } -func (r *Couchbase) Description() string { - return "Read metrics from one or many couchbase clusters" +func (cb *Couchbase) Description() string { + return "Read per-node and per-bucket metrics from Couchbase" } // Reads stats from all configured clusters. Accumulates stats. // Returns one of the errors encountered while gathering stats (if any). -func (r *Couchbase) Gather(acc telegraf.Accumulator) error { - if len(r.Servers) == 0 { - r.gatherServer("http://localhost:8091/", acc, nil) - return nil +func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { + if len(cb.Servers) == 0 { + return cb.gatherServer(acc, "http://localhost:8091/") } var wg sync.WaitGroup - - for _, serv := range r.Servers { + for _, serv := range cb.Servers { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(r.gatherServer(serv, acc, nil)) + acc.AddError(cb.gatherServer(acc, serv)) }(serv) } @@ -59,26 +81,26 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error { return nil } -func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error { - if pool == nil { - client, err := couchbase.Connect(addr) - if err != nil { - return err - } +func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error { + escapedAddr := regexpURI.ReplaceAllString(addr, "${1}") - // `default` is the only possible pool name. It's a - // placeholder for a possible future Couchbase feature. See - // http://stackoverflow.com/a/16990911/17498. - p, err := client.GetPool("default") - if err != nil { - return err - } - pool = &p + client, err := couchbaseClient.Connect(addr) + if err != nil { + return err } + // `default` is the only possible pool name. It's a + // placeholder for a possible future Couchbase feature. See + // http://stackoverflow.com/a/16990911/17498. + pool, err := client.GetPool("default") + if err != nil { + return err + } + defer pool.Close() + for i := 0; i < len(pool.Nodes); i++ { node := pool.Nodes[i] - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname} + tags := map[string]string{"cluster": escapedAddr, "hostname": node.Hostname} fields := make(map[string]interface{}) fields["memory_free"] = node.MemoryFree fields["memory_total"] = node.MemoryTotal @@ -86,23 +108,321 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co } for bucketName := range pool.BucketMap { - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "bucket": bucketName} + tags := map[string]string{"cluster": escapedAddr, "bucket": bucketName} bs := pool.BucketMap[bucketName].BasicStats fields := make(map[string]interface{}) - fields["quota_percent_used"] = bs["quotaPercentUsed"] - fields["ops_per_sec"] = bs["opsPerSec"] - fields["disk_fetches"] = bs["diskFetches"] - fields["item_count"] = bs["itemCount"] - fields["disk_used"] = bs["diskUsed"] - fields["data_used"] = bs["dataUsed"] - fields["mem_used"] = bs["memUsed"] + cb.addBucketField(fields, "quota_percent_used", bs["quotaPercentUsed"]) + cb.addBucketField(fields, "ops_per_sec", bs["opsPerSec"]) + cb.addBucketField(fields, "disk_fetches", bs["diskFetches"]) + cb.addBucketField(fields, "item_count", bs["itemCount"]) + cb.addBucketField(fields, "disk_used", bs["diskUsed"]) + cb.addBucketField(fields, "data_used", bs["dataUsed"]) + cb.addBucketField(fields, "mem_used", bs["memUsed"]) + + err := cb.gatherDetailedBucketStats(addr, bucketName, fields) + if err != nil { + return err + } + acc.AddFields("couchbase_bucket", fields, tags) } + + return nil +} + +func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, fields map[string]interface{}) error { + extendedBucketStats := &BucketStats{} + err := cb.queryDetailedBucketStats(server, bucket, extendedBucketStats) + if err != nil { + return err + } + + // Use length of any set of metrics, they will all be the same length. + lastEntry := len(extendedBucketStats.Op.Samples.CouchTotalDiskSize) - 1 + cb.addBucketFieldChecked(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation, lastEntry) + cb.addBucketFieldChecked(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio, lastEntry) + cb.addBucketFieldChecked(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate, lastEntry) + cb.addBucketFieldChecked(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio, lastEntry) + cb.addBucketFieldChecked(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime, lastEntry) + cb.addBucketFieldChecked(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime, lastEntry) + cb.addBucketFieldChecked(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime, lastEntry) + cb.addBucketFieldChecked(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift, lastEntry) + cb.addBucketFieldChecked(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount, lastEntry) + cb.addBucketFieldChecked(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal, lastEntry) + cb.addBucketFieldChecked(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead, lastEntry) + cb.addBucketFieldChecked(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten, lastEntry) + cb.addBucketFieldChecked(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval, lastEntry) + cb.addBucketFieldChecked(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits, lastEntry) + cb.addBucketFieldChecked(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses, lastEntry) + cb.addBucketFieldChecked(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet, lastEntry) + cb.addBucketFieldChecked(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup, lastEntry) + cb.addBucketFieldChecked(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps, lastEntry) + cb.addBucketFieldChecked(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections, lastEntry) + cb.addBucketFieldChecked(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems, lastEntry) + cb.addBucketFieldChecked(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot, lastEntry) + cb.addBucketFieldChecked(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits, lastEntry) + cb.addBucketFieldChecked(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses, lastEntry) + cb.addBucketFieldChecked(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits, lastEntry) + cb.addBucketFieldChecked(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses, lastEntry) + cb.addBucketFieldChecked(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount, lastEntry) + cb.addBucketFieldChecked(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal, lastEntry) + cb.addBucketFieldChecked(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount, lastEntry) + cb.addBucketFieldChecked(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal, lastEntry) + cb.addBucketFieldChecked(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue, lastEntry) + cb.addBucketFieldChecked(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions, lastEntry) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift, lastEntry) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched, lastEntry) + cb.addBucketFieldChecked(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded, lastEntry) + cb.addBucketFieldChecked(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed, lastEntry) + cb.addBucketFieldChecked(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems, lastEntry) + cb.addBucketFieldChecked(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo, lastEntry) + cb.addBucketFieldChecked(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed, lastEntry) + cb.addBucketFieldChecked(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat, lastEntry) + cb.addBucketFieldChecked(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat, lastEntry) + cb.addBucketFieldChecked(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects, lastEntry) + cb.addBucketFieldChecked(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors, lastEntry) + cb.addBucketFieldChecked(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead, lastEntry) + cb.addBucketFieldChecked(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions, lastEntry) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift, lastEntry) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors, lastEntry) + cb.addBucketFieldChecked(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal, lastEntry) + cb.addBucketFieldChecked(fields, "evictions", extendedBucketStats.Op.Samples.Evictions, lastEntry) + cb.addBucketFieldChecked(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits, lastEntry) + cb.addBucketFieldChecked(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses, lastEntry) + cb.addBucketFieldChecked(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits, lastEntry) + cb.addBucketFieldChecked(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses, lastEntry) + cb.addBucketFieldChecked(fields, "misses", extendedBucketStats.Op.Samples.Misses, lastEntry) + cb.addBucketFieldChecked(fields, "ops", extendedBucketStats.Op.Samples.Ops, lastEntry) + cb.addBucketFieldChecked(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps, lastEntry) + cb.addBucketFieldChecked(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate, lastEntry) + cb.addBucketFieldChecked(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests, lastEntry) + cb.addBucketFieldChecked(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked, lastEntry) + cb.addBucketFieldChecked(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree, lastEntry) + cb.addBucketFieldChecked(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed, lastEntry) + cb.addBucketFieldChecked(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree, lastEntry) + cb.addBucketFieldChecked(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit, lastEntry) + cb.addBucketFieldChecked(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal, lastEntry) + cb.addBucketFieldChecked(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys, lastEntry) + cb.addBucketFieldChecked(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed, lastEntry) + cb.addBucketFieldChecked(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests, lastEntry) + cb.addBucketFieldChecked(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal, lastEntry) + cb.addBucketFieldChecked(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed, lastEntry) + + return nil +} + +func (cb *Couchbase) addBucketField(fields map[string]interface{}, fieldKey string, value interface{}) { + if !cb.bucketInclude.Match(fieldKey) { + return + } + + fields[fieldKey] = value +} + +func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldKey string, values []float64, index int) { + if values == nil { + return + } + + cb.addBucketField(fields, fieldKey, values[index]) +} + +func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, bucketStats *BucketStats) error { + // Set up an HTTP request to get the complete set of bucket stats. + req, err := http.NewRequest("GET", server+"/pools/default/buckets/"+bucket+"/stats?", nil) + if err != nil { + return err + } + + r, err := cb.client.Do(req) + if err != nil { + return err + } + + defer r.Body.Close() + + return json.NewDecoder(r.Body).Decode(bucketStats) +} + +func (cb *Couchbase) Init() error { + f, err := filter.NewIncludeExcludeFilter(cb.BucketStatsIncluded, []string{}) + if err != nil { + return err + } + + cb.bucketInclude = f + + tlsConfig, err := cb.TLSConfig() + if err != nil { + return err + } + + cb.client = &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConnsPerHost: couchbaseClient.MaxIdleConnsPerHost, + TLSClientConfig: tlsConfig, + }, + } + + couchbaseClient.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify) + couchbaseClient.SetCertFile(cb.ClientConfig.TLSCert) + couchbaseClient.SetKeyFile(cb.ClientConfig.TLSKey) + couchbaseClient.SetRootFile(cb.ClientConfig.TLSCA) + return nil } func init() { inputs.Add("couchbase", func() telegraf.Input { - return &Couchbase{} + return &Couchbase{ + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, + } }) } diff --git a/plugins/inputs/couchbase/couchbase_data.go b/plugins/inputs/couchbase/couchbase_data.go new file mode 100644 index 0000000000000..2b1227f5c8cdc --- /dev/null +++ b/plugins/inputs/couchbase/couchbase_data.go @@ -0,0 +1,228 @@ +package couchbase + +type BucketStats struct { + Op struct { + Samples struct { + CouchTotalDiskSize []float64 `json:"couch_total_disk_size"` + CouchDocsFragmentation []float64 `json:"couch_docs_fragmentation"` + CouchViewsFragmentation []float64 `json:"couch_views_fragmentation"` + HitRatio []float64 `json:"hit_ratio"` + EpCacheMissRate []float64 `json:"ep_cache_miss_rate"` + EpResidentItemsRate []float64 `json:"ep_resident_items_rate"` + VbAvgActiveQueueAge []float64 `json:"vb_avg_active_queue_age"` + VbAvgReplicaQueueAge []float64 `json:"vb_avg_replica_queue_age"` + VbAvgPendingQueueAge []float64 `json:"vb_avg_pending_queue_age"` + VbAvgTotalQueueAge []float64 `json:"vb_avg_total_queue_age"` + VbActiveResidentItemsRatio []float64 `json:"vb_active_resident_items_ratio"` + VbReplicaResidentItemsRatio []float64 `json:"vb_replica_resident_items_ratio"` + VbPendingResidentItemsRatio []float64 `json:"vb_pending_resident_items_ratio"` + AvgDiskUpdateTime []float64 `json:"avg_disk_update_time"` + AvgDiskCommitTime []float64 `json:"avg_disk_commit_time"` + AvgBgWaitTime []float64 `json:"avg_bg_wait_time"` + AvgActiveTimestampDrift []float64 `json:"avg_active_timestamp_drift"` + AvgReplicaTimestampDrift []float64 `json:"avg_replica_timestamp_drift"` + EpDcpViewsIndexesCount []float64 `json:"ep_dcp_views+indexes_count"` + EpDcpViewsIndexesItemsRemaining []float64 `json:"ep_dcp_views+indexes_items_remaining"` + EpDcpViewsIndexesProducerCount []float64 `json:"ep_dcp_views+indexes_producer_count"` + EpDcpViewsIndexesTotalBacklogSize []float64 `json:"ep_dcp_views+indexes_total_backlog_size"` + EpDcpViewsIndexesItemsSent []float64 `json:"ep_dcp_views+indexes_items_sent"` + EpDcpViewsIndexesTotalBytes []float64 `json:"ep_dcp_views+indexes_total_bytes"` + EpDcpViewsIndexesBackoff []float64 `json:"ep_dcp_views+indexes_backoff"` + BgWaitCount []float64 `json:"bg_wait_count"` + BgWaitTotal []float64 `json:"bg_wait_total"` + BytesRead []float64 `json:"bytes_read"` + BytesWritten []float64 `json:"bytes_written"` + CasBadval []float64 `json:"cas_badval"` + CasHits []float64 `json:"cas_hits"` + CasMisses []float64 `json:"cas_misses"` + CmdGet []float64 `json:"cmd_get"` + CmdLookup []float64 `json:"cmd_lookup"` + CmdSet []float64 `json:"cmd_set"` + CouchDocsActualDiskSize []float64 `json:"couch_docs_actual_disk_size"` + CouchDocsDataSize []float64 `json:"couch_docs_data_size"` + CouchDocsDiskSize []float64 `json:"couch_docs_disk_size"` + CouchSpatialDataSize []float64 `json:"couch_spatial_data_size"` + CouchSpatialDiskSize []float64 `json:"couch_spatial_disk_size"` + CouchSpatialOps []float64 `json:"couch_spatial_ops"` + CouchViewsActualDiskSize []float64 `json:"couch_views_actual_disk_size"` + CouchViewsDataSize []float64 `json:"couch_views_data_size"` + CouchViewsDiskSize []float64 `json:"couch_views_disk_size"` + CouchViewsOps []float64 `json:"couch_views_ops"` + CurrConnections []float64 `json:"curr_connections"` + CurrItems []float64 `json:"curr_items"` + CurrItemsTot []float64 `json:"curr_items_tot"` + DecrHits []float64 `json:"decr_hits"` + DecrMisses []float64 `json:"decr_misses"` + DeleteHits []float64 `json:"delete_hits"` + DeleteMisses []float64 `json:"delete_misses"` + DiskCommitCount []float64 `json:"disk_commit_count"` + DiskCommitTotal []float64 `json:"disk_commit_total"` + DiskUpdateCount []float64 `json:"disk_update_count"` + DiskUpdateTotal []float64 `json:"disk_update_total"` + DiskWriteQueue []float64 `json:"disk_write_queue"` + EpActiveAheadExceptions []float64 `json:"ep_active_ahead_exceptions"` + EpActiveHlcDrift []float64 `json:"ep_active_hlc_drift"` + EpActiveHlcDriftCount []float64 `json:"ep_active_hlc_drift_count"` + EpBgFetched []float64 `json:"ep_bg_fetched"` + EpClockCasDriftThresholdExceeded []float64 `json:"ep_clock_cas_drift_threshold_exceeded"` + EpDataReadFailed []float64 `json:"ep_data_read_failed"` + EpDataWriteFailed []float64 `json:"ep_data_write_failed"` + EpDcp2IBackoff []float64 `json:"ep_dcp_2i_backoff"` + EpDcp2ICount []float64 `json:"ep_dcp_2i_count"` + EpDcp2IItemsRemaining []float64 `json:"ep_dcp_2i_items_remaining"` + EpDcp2IItemsSent []float64 `json:"ep_dcp_2i_items_sent"` + EpDcp2IProducerCount []float64 `json:"ep_dcp_2i_producer_count"` + EpDcp2ITotalBacklogSize []float64 `json:"ep_dcp_2i_total_backlog_size"` + EpDcp2ITotalBytes []float64 `json:"ep_dcp_2i_total_bytes"` + EpDcpCbasBackoff []float64 `json:"ep_dcp_cbas_backoff"` + EpDcpCbasCount []float64 `json:"ep_dcp_cbas_count"` + EpDcpCbasItemsRemaining []float64 `json:"ep_dcp_cbas_items_remaining"` + EpDcpCbasItemsSent []float64 `json:"ep_dcp_cbas_items_sent"` + EpDcpCbasProducerCount []float64 `json:"ep_dcp_cbas_producer_count"` + EpDcpCbasTotalBacklogSize []float64 `json:"ep_dcp_cbas_total_backlog_size"` + EpDcpCbasTotalBytes []float64 `json:"ep_dcp_cbas_total_bytes"` + EpDcpEventingBackoff []float64 `json:"ep_dcp_eventing_backoff"` + EpDcpEventingCount []float64 `json:"ep_dcp_eventing_count"` + EpDcpEventingItemsRemaining []float64 `json:"ep_dcp_eventing_items_remaining"` + EpDcpEventingItemsSent []float64 `json:"ep_dcp_eventing_items_sent"` + EpDcpEventingProducerCount []float64 `json:"ep_dcp_eventing_producer_count"` + EpDcpEventingTotalBacklogSize []float64 `json:"ep_dcp_eventing_total_backlog_size"` + EpDcpEventingTotalBytes []float64 `json:"ep_dcp_eventing_total_bytes"` + EpDcpFtsBackoff []float64 `json:"ep_dcp_fts_backoff"` + EpDcpFtsCount []float64 `json:"ep_dcp_fts_count"` + EpDcpFtsItemsRemaining []float64 `json:"ep_dcp_fts_items_remaining"` + EpDcpFtsItemsSent []float64 `json:"ep_dcp_fts_items_sent"` + EpDcpFtsProducerCount []float64 `json:"ep_dcp_fts_producer_count"` + EpDcpFtsTotalBacklogSize []float64 `json:"ep_dcp_fts_total_backlog_size"` + EpDcpFtsTotalBytes []float64 `json:"ep_dcp_fts_total_bytes"` + EpDcpOtherBackoff []float64 `json:"ep_dcp_other_backoff"` + EpDcpOtherCount []float64 `json:"ep_dcp_other_count"` + EpDcpOtherItemsRemaining []float64 `json:"ep_dcp_other_items_remaining"` + EpDcpOtherItemsSent []float64 `json:"ep_dcp_other_items_sent"` + EpDcpOtherProducerCount []float64 `json:"ep_dcp_other_producer_count"` + EpDcpOtherTotalBacklogSize []float64 `json:"ep_dcp_other_total_backlog_size"` + EpDcpOtherTotalBytes []float64 `json:"ep_dcp_other_total_bytes"` + EpDcpReplicaBackoff []float64 `json:"ep_dcp_replica_backoff"` + EpDcpReplicaCount []float64 `json:"ep_dcp_replica_count"` + EpDcpReplicaItemsRemaining []float64 `json:"ep_dcp_replica_items_remaining"` + EpDcpReplicaItemsSent []float64 `json:"ep_dcp_replica_items_sent"` + EpDcpReplicaProducerCount []float64 `json:"ep_dcp_replica_producer_count"` + EpDcpReplicaTotalBacklogSize []float64 `json:"ep_dcp_replica_total_backlog_size"` + EpDcpReplicaTotalBytes []float64 `json:"ep_dcp_replica_total_bytes"` + EpDcpViewsBackoff []float64 `json:"ep_dcp_views_backoff"` + EpDcpViewsCount []float64 `json:"ep_dcp_views_count"` + EpDcpViewsItemsRemaining []float64 `json:"ep_dcp_views_items_remaining"` + EpDcpViewsItemsSent []float64 `json:"ep_dcp_views_items_sent"` + EpDcpViewsProducerCount []float64 `json:"ep_dcp_views_producer_count"` + EpDcpViewsTotalBacklogSize []float64 `json:"ep_dcp_views_total_backlog_size"` + EpDcpViewsTotalBytes []float64 `json:"ep_dcp_views_total_bytes"` + EpDcpXdcrBackoff []float64 `json:"ep_dcp_xdcr_backoff"` + EpDcpXdcrCount []float64 `json:"ep_dcp_xdcr_count"` + EpDcpXdcrItemsRemaining []float64 `json:"ep_dcp_xdcr_items_remaining"` + EpDcpXdcrItemsSent []float64 `json:"ep_dcp_xdcr_items_sent"` + EpDcpXdcrProducerCount []float64 `json:"ep_dcp_xdcr_producer_count"` + EpDcpXdcrTotalBacklogSize []float64 `json:"ep_dcp_xdcr_total_backlog_size"` + EpDcpXdcrTotalBytes []float64 `json:"ep_dcp_xdcr_total_bytes"` + EpDiskqueueDrain []float64 `json:"ep_diskqueue_drain"` + EpDiskqueueFill []float64 `json:"ep_diskqueue_fill"` + EpDiskqueueItems []float64 `json:"ep_diskqueue_items"` + EpFlusherTodo []float64 `json:"ep_flusher_todo"` + EpItemCommitFailed []float64 `json:"ep_item_commit_failed"` + EpKvSize []float64 `json:"ep_kv_size"` + EpMaxSize []float64 `json:"ep_max_size"` + EpMemHighWat []float64 `json:"ep_mem_high_wat"` + EpMemLowWat []float64 `json:"ep_mem_low_wat"` + EpMetaDataMemory []float64 `json:"ep_meta_data_memory"` + EpNumNonResident []float64 `json:"ep_num_non_resident"` + EpNumOpsDelMeta []float64 `json:"ep_num_ops_del_meta"` + EpNumOpsDelRetMeta []float64 `json:"ep_num_ops_del_ret_meta"` + EpNumOpsGetMeta []float64 `json:"ep_num_ops_get_meta"` + EpNumOpsSetMeta []float64 `json:"ep_num_ops_set_meta"` + EpNumOpsSetRetMeta []float64 `json:"ep_num_ops_set_ret_meta"` + EpNumValueEjects []float64 `json:"ep_num_value_ejects"` + EpOomErrors []float64 `json:"ep_oom_errors"` + EpOpsCreate []float64 `json:"ep_ops_create"` + EpOpsUpdate []float64 `json:"ep_ops_update"` + EpOverhead []float64 `json:"ep_overhead"` + EpQueueSize []float64 `json:"ep_queue_size"` + EpReplicaAheadExceptions []float64 `json:"ep_replica_ahead_exceptions"` + EpReplicaHlcDrift []float64 `json:"ep_replica_hlc_drift"` + EpReplicaHlcDriftCount []float64 `json:"ep_replica_hlc_drift_count"` + EpTmpOomErrors []float64 `json:"ep_tmp_oom_errors"` + EpVbTotal []float64 `json:"ep_vb_total"` + Evictions []float64 `json:"evictions"` + GetHits []float64 `json:"get_hits"` + GetMisses []float64 `json:"get_misses"` + IncrHits []float64 `json:"incr_hits"` + IncrMisses []float64 `json:"incr_misses"` + MemUsed []float64 `json:"mem_used"` + Misses []float64 `json:"misses"` + Ops []float64 `json:"ops"` + Timestamp []float64 `json:"timestamp"` + VbActiveEject []float64 `json:"vb_active_eject"` + VbActiveItmMemory []float64 `json:"vb_active_itm_memory"` + VbActiveMetaDataMemory []float64 `json:"vb_active_meta_data_memory"` + VbActiveNum []float64 `json:"vb_active_num"` + VbActiveNumNonResident []float64 `json:"vb_active_num_non_resident"` + VbActiveOpsCreate []float64 `json:"vb_active_ops_create"` + VbActiveOpsUpdate []float64 `json:"vb_active_ops_update"` + VbActiveQueueAge []float64 `json:"vb_active_queue_age"` + VbActiveQueueDrain []float64 `json:"vb_active_queue_drain"` + VbActiveQueueFill []float64 `json:"vb_active_queue_fill"` + VbActiveQueueSize []float64 `json:"vb_active_queue_size"` + VbActiveSyncWriteAbortedCount []float64 `json:"vb_active_sync_write_aborted_count"` + VbActiveSyncWriteAcceptedCount []float64 `json:"vb_active_sync_write_accepted_count"` + VbActiveSyncWriteCommittedCount []float64 `json:"vb_active_sync_write_committed_count"` + VbPendingCurrItems []float64 `json:"vb_pending_curr_items"` + VbPendingEject []float64 `json:"vb_pending_eject"` + VbPendingItmMemory []float64 `json:"vb_pending_itm_memory"` + VbPendingMetaDataMemory []float64 `json:"vb_pending_meta_data_memory"` + VbPendingNum []float64 `json:"vb_pending_num"` + VbPendingNumNonResident []float64 `json:"vb_pending_num_non_resident"` + VbPendingOpsCreate []float64 `json:"vb_pending_ops_create"` + VbPendingOpsUpdate []float64 `json:"vb_pending_ops_update"` + VbPendingQueueAge []float64 `json:"vb_pending_queue_age"` + VbPendingQueueDrain []float64 `json:"vb_pending_queue_drain"` + VbPendingQueueFill []float64 `json:"vb_pending_queue_fill"` + VbPendingQueueSize []float64 `json:"vb_pending_queue_size"` + VbReplicaCurrItems []float64 `json:"vb_replica_curr_items"` + VbReplicaEject []float64 `json:"vb_replica_eject"` + VbReplicaItmMemory []float64 `json:"vb_replica_itm_memory"` + VbReplicaMetaDataMemory []float64 `json:"vb_replica_meta_data_memory"` + VbReplicaNum []float64 `json:"vb_replica_num"` + VbReplicaNumNonResident []float64 `json:"vb_replica_num_non_resident"` + VbReplicaOpsCreate []float64 `json:"vb_replica_ops_create"` + VbReplicaOpsUpdate []float64 `json:"vb_replica_ops_update"` + VbReplicaQueueAge []float64 `json:"vb_replica_queue_age"` + VbReplicaQueueDrain []float64 `json:"vb_replica_queue_drain"` + VbReplicaQueueFill []float64 `json:"vb_replica_queue_fill"` + VbReplicaQueueSize []float64 `json:"vb_replica_queue_size"` + VbTotalQueueAge []float64 `json:"vb_total_queue_age"` + XdcOps []float64 `json:"xdc_ops"` + Allocstall []float64 `json:"allocstall"` + CPUCoresAvailable []float64 `json:"cpu_cores_available"` + CPUIrqRate []float64 `json:"cpu_irq_rate"` + CPUStolenRate []float64 `json:"cpu_stolen_rate"` + CPUSysRate []float64 `json:"cpu_sys_rate"` + CPUUserRate []float64 `json:"cpu_user_rate"` + CPUUtilizationRate []float64 `json:"cpu_utilization_rate"` + HibernatedRequests []float64 `json:"hibernated_requests"` + HibernatedWaked []float64 `json:"hibernated_waked"` + MemActualFree []float64 `json:"mem_actual_free"` + MemActualUsed []float64 `json:"mem_actual_used"` + MemFree []float64 `json:"mem_free"` + MemLimit []float64 `json:"mem_limit"` + MemTotal []float64 `json:"mem_total"` + MemUsedSys []float64 `json:"mem_used_sys"` + OdpReportFailed []float64 `json:"odp_report_failed"` + RestRequests []float64 `json:"rest_requests"` + SwapTotal []float64 `json:"swap_total"` + SwapUsed []float64 `json:"swap_used"` + } `json:"samples"` + Samplescount int `json:"samplesCount"` + Ispersistent bool `json:"isPersistent"` + Lasttstamp int64 `json:"lastTStamp"` + Interval int `json:"interval"` + } `json:"op"` + HotKeys []interface{} `json:"hot_keys"` +} diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index df7f1b4c14cf7..e6abc3ea74c01 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -2,31 +2,49 @@ package couchbase import ( "encoding/json" + "github.com/influxdata/telegraf/plugins/common/tls" + "net/http" + "net/http/httptest" "testing" "github.com/influxdata/telegraf/testutil" - - "github.com/couchbase/go-couchbase" + "github.com/stretchr/testify/require" ) func TestGatherServer(t *testing.T) { - var pool couchbase.Pool - if err := json.Unmarshal([]byte(poolsDefaultResponse), &pool); err != nil { - t.Fatal("parse poolsDefaultResponse", err) - } + bucket := "blastro-df" + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools" { + _, _ = w.Write([]byte(poolsResponse)) + } else if r.URL.Path == "/pools/default" { + _, _ = w.Write([]byte(poolsDefaultResponse)) + } else if r.URL.Path == "/pools/default/buckets" { + _, _ = w.Write([]byte(bucketsResponse)) + } else if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + _, _ = w.Write([]byte(bucketStatsResponse)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) - if err := json.Unmarshal([]byte(bucketResponse), &pool.BucketMap); err != nil { - t.Fatal("parse bucketResponse", err) + cb := Couchbase{ + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, } - var cb Couchbase + err := cb.Init() + require.NoError(t, err) + var acc testutil.Accumulator - cb.gatherServer("mycluster", &acc, &pool) + err = cb.gatherServer(&acc, fakeServer.URL) + require.NoError(t, err) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, - map[string]string{"cluster": "mycluster", "hostname": "172.16.10.187:8091"}) + map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.187:8091"}) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23665811456.0, "memory_total": 64424656896.0}, - map[string]string{"cluster": "mycluster", "hostname": "172.16.10.65:8091"}) + map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.65:8091"}) acc.AssertContainsTaggedFields(t, "couchbase_bucket", map[string]interface{}{ "quota_percent_used": 68.85424936294555, @@ -37,11 +55,10 @@ func TestGatherServer(t *testing.T) { "data_used": 212179309111.0, "mem_used": 202156957464.0, }, - map[string]string{"cluster": "mycluster", "bucket": "blastro-df"}) + map[string]string{"cluster": fakeServer.URL, "bucket": "blastro-df"}) } func TestSanitizeURI(t *testing.T) { - var sanitizeTest = []struct { input string expected string @@ -64,8 +81,67 @@ func TestSanitizeURI(t *testing.T) { } } +func TestGatherDetailedBucketMetrics(t *testing.T) { + bucket := "Ducks" + tests := []struct { + name string + response string + }{ + { + name: "with all fields", + response: bucketStatsResponse, + }, + { + name: "missing fields", + response: bucketStatsResponseWithMissing, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + _, _ = w.Write([]byte(test.response)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + var err error + var cb Couchbase + cb.BucketStatsIncluded = []string{"couch_total_disk_size"} + cb.ClientConfig = tls.ClientConfig{ + InsecureSkipVerify: true, + } + err = cb.Init() + require.NoError(t, err) + var acc testutil.Accumulator + bucketStats := &BucketStats{} + if err := json.Unmarshal([]byte(test.response), bucketStats); err != nil { + t.Fatal("parse bucketResponse", err) + } + + fields := make(map[string]interface{}) + err = cb.gatherDetailedBucketStats(fakeServer.URL, bucket, fields) + require.NoError(t, err) + + acc.AddFields("couchbase_bucket", fields, nil) + + // Ensure we gathered only one metric (the one that we configured). + require.Equal(t, len(acc.Metrics), 1) + require.Equal(t, len(acc.Metrics[0].Fields), 1) + }) + } +} + +// From `/pools` +const poolsResponse string = `{"pools":[{"name":"default","uri":"/pools/default"}]}` + // From `/pools/default` on a real cluster -const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` +const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode"},"rebalance":{"uri":"/controller/rebalance"},"failOver":{"uri":"/controller/failOver"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover"},"reAddNode":{"uri":"/controller/reAddNode"},"reFailOver":{"uri":"/controller/reFailOver"},"ejectNode":{"uri":"/controller/ejectNode"},"setRecoveryType":{"uri":"/controller/setRecoveryType"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection","cancelURI":"/controller/cancelLogsCollection"},"replication":{"createURI":"/controller/createReplication","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks"},"visualSettingsUri":"/internalSettings/visual","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` + +// From `/pools/default/buckets` on a real cluster +const bucketsResponse string = `[{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}]` -// From `/pools/default/buckets/blastro-df` on a real cluster -const bucketResponse string = `{"blastro-df": {"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}}` +const bucketStatsResponse string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"hit_ratio":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_cache_miss_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_resident_items_rate":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_avg_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_replica_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_pending_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"avg_disk_update_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_disk_commit_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_bg_wait_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_active_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_replica_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bytes_read":[118.1818181818182,142.2805247225025,180.8080808080808,197.7800201816347,141.9939577039275,118.5410334346505,142.4242424242424,148.4848484848485,197.3816717019134,202.4291497975709,118.0625630676085,142.4242424242424,179.6165489404642,197.979797979798,142.4242424242424,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,201.816347124117,118.1818181818182,142.4242424242424,148.4848484848485,197.7800201816347,142.4242424242424,118.1818181818182,142.2805247225025,179.7979797979798,197.1830985915493,202.6342451874367,118.1818181818182,142.2805247225025,180.4435483870968,198.3805668016194,142.2805247225025,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,202.020202020202,118.0625630676085,118.1818181818182,204.040404040404,197.7800201816347,142.1370967741935,118.4210526315789,118.1818181818182,172.5529767911201,197.5806451612903,202.4291497975709,118.0625630676085,118.1818181818182,172.7272727272727,197.7800201816347,142.4242424242424,118.0625630676085,118.1818181818182,204.040404040404,197.979797979798,201.816347124117],"bytes_written":[36420.20202020202,37762.86579212916,37225.25252525252,50460.14127144299,37686.80765357502,36530.90172239109,37801.0101010101,37111.11111111111,50358.50956696878,60511.13360323886,36383.45105953582,37801.0101010101,37393.54187689203,50511.11111111111,37801.0101010101,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60327.95156407669,36420.20202020202,37801.0101010101,37111.11111111111,50460.14127144299,37801.0101010101,36420.20202020202,37762.86579212916,37431.31313131313,50307.84708249497,60572.44174265451,36420.20202020202,37762.86579212916,37150.20161290323,50613.36032388664,37762.86579212916,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60388.88888888889,36383.45105953582,36420.20202020202,38812.12121212122,50460.14127144299,37724.79838709677,36493.92712550607,36420.20202020202,38453.07769929364,50409.27419354839,60511.13360323886,36383.45105953582,36420.20202020202,38491.91919191919,50460.14127144299,37801.0101010101,36383.45105953582,36420.20202020202,38812.12121212122,50511.11111111111,60327.95156407669],"cas_badval":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_get":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_lookup":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_set":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_docs_actual_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_data_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_docs_disk_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_spatial_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_actual_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"curr_connections":[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14],"curr_items":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"curr_items_tot":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"decr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"decr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_write_queue":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_bg_fetched":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_clock_cas_drift_threshold_exceeded":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_read_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_write_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_flusher_todo":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_item_commit_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_kv_size":[10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340],"ep_max_size":[8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032],"ep_mem_high_wat":[7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627],"ep_mem_low_wat":[6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024],"ep_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"ep_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_get_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_value_ejects":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_overhead":[403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824],"ep_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_tmp_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_vb_total":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"evictions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_used":[4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016],"misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"timestamp":[1615918120012,1615918121003,1615918121993,1615918122984,1615918123977,1615918124964,1615918125954,1615918126944,1615918127937,1615918128925,1615918129916,1615918130906,1615918131897,1615918132887,1615918133877,1615918134867,1615918135858,1615918136848,1615918137838,1615918138829,1615918139819,1615918140809,1615918141799,1615918142790,1615918143780,1615918144770,1615918145761,1615918146751,1615918147745,1615918148732,1615918149722,1615918150713,1615918151705,1615918152693,1615918153684,1615918154674,1615918155665,1615918156655,1615918157645,1615918158635,1615918159626,1615918160616,1615918161606,1615918162597,1615918163589,1615918164577,1615918165567,1615918166558,1615918167550,1615918168538,1615918169529,1615918170519,1615918171509,1615918172500,1615918173490,1615918174481,1615918175471,1615918176461,1615918177451,1615918178442],"vb_active_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_itm_memory":[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88],"vb_active_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"vb_active_num":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"vb_active_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_aborted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_accepted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_committed_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"xdc_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"allocstall":[18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615],"cpu_cores_available":[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12],"cpu_irq_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_stolen_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_sys_rate":[4.942965779467681,5.243268776570619,6.823027718550106,4.815073272854153,4.853128991060026,5.068836045056321,4.983108108108108,4.110738255033557,3.201347935973041,3.959561920808762,3.610411418975651,3.459915611814346,3.691275167785235,4.553119730185498,6.470588235294118,4.545454545454546,5.046257359125315,5.976430976430977,5.564924114671164,3.703703703703704,3.529411764705882,3.544303797468354,3.826787512588117,5.118961788031723,7.166947723440135,5.87248322147651,4.289318755256518,5.485232067510548,4.765886287625418,4.672897196261682,4.184100418410042,4.560810810810811,7.02928870292887,6.081081081081081,5.378151260504202,6.239460370994941,8.984047019311502,6.896551724137931,9.636517328825022,9.335576114381833,7.64063811922754,8.684654300168635,6.543624161073826,6.465155331654072,5.961376994122586,3.807106598984772,3.36417157275021,3.700588730025231,3.775167785234899,9.45945945945946,3.114478114478115,3.451178451178451,4.465037910699242,3.852596314907873,3.462837837837838,5.205709487825357,5.218855218855219,6.532663316582915,5.885057471264368,4.030226700251889],"cpu_user_rate":[15.20912547528517,9.58904109589041,10.76759061833689,8.443824145150035,8.301404853128991,10.95118898623279,9.797297297297296,6.879194630872483,6.823925863521483,6.908171861836562,6.54911838790932,6.835443037974684,7.382550335570469,10.28667790893761,16.97478991596639,11.53198653198653,9.75609756097561,11.11111111111111,12.05733558178752,7.154882154882155,6.890756302521009,6.666666666666667,7.150050352467271,10.23792357606345,12.7318718381113,9.479865771812081,7.905803195962994,8.016877637130802,9.19732441471572,9.600679694137638,7.364016736401673,8.108108108108109,15.31380753138075,13.85135135135135,10.58823529411765,12.64755480607083,18.47187237615449,13.28847771236333,19.8647506339814,21.86711522287637,23.5936188077246,22.17537942664418,12.08053691275168,16.96053736356003,32.49370277078086,8.20642978003384,10.17661900756939,7.653490328006728,10.82214765100671,14.27364864864865,6.986531986531986,7.407407407407407,10.02527379949452,11.55778894472362,8.192567567567568,12.34256926952141,14.05723905723906,28.64321608040201,13.14942528735632,7.388748950461797],"cpu_utilization_rate":[20.15209125475285,14.83230987246103,17.59061833688699,13.25889741800419,13.15453384418902,16.02002503128911,14.78040540540541,10.98993288590604,10.02527379949452,10.86773378264532,10.15952980688497,10.29535864978903,11.0738255033557,14.8397976391231,23.4453781512605,16.07744107744108,14.80235492010092,17.08754208754209,17.62225969645868,10.85858585858586,10.42016806722689,10.21097046413502,10.97683786505539,15.35688536409517,19.89881956155143,15.35234899328859,12.19512195121951,13.50210970464135,13.96321070234114,14.27357689039932,11.54811715481171,12.66891891891892,22.34309623430962,19.93243243243243,15.96638655462185,18.88701517706577,27.45591939546599,20.18502943650126,29.50126796280642,31.2026913372582,31.23425692695214,30.86003372681282,18.6241610738255,23.42569269521411,38.45507976490345,12.01353637901861,13.5407905803196,11.35407905803196,14.59731543624161,23.73310810810811,10.1010101010101,10.85858585858586,14.49031171019377,15.41038525963149,11.65540540540541,17.54827875734677,19.27609427609428,35.17587939698493,19.03448275862069,11.41897565071369],"hibernated_requests":[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7],"hibernated_waked":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_actual_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_actual_used":[10175004672,10181505024,10123186176,10124263424,10120626176,10101411840,10100801536,10100355072,10100891648,10091769856,10088787968,10088095744,10088280064,10099761152,10106314752,10105954304,10099724288,10114744320,10116685824,10107191296,10106101760,10106327040,10106327040,10104967168,10126032896,10128973824,10124148736,10123046912,10122588160,10126843904,10127106048,10127884288,10130755584,10139250688,10134233088,10128961536,10158841856,10178539520,10193973248,10193973248,10224226304,10120118272,10121252864,10129801216,10132705280,10133995520,10129596416,10111340544,10106191872,10100752384,10101616640,10103988224,10114789376,10113617920,10114142208,10116382720,10115072000,10113662976,10111049728,10108059648],"mem_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_limit":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_total":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_used_sys":[16694517760,16707862528,16608030720,16610041856,16604663808,16553811968,16553463808,16553369600,16553861120,16539238400,16536092672,16535760896,16535707648,16553418752,16559439872,16558895104,16554569728,16580468736,16582680576,16565084160,16564649984,16565272576,16565272576,16562823168,16599863296,16602157056,16597528576,16596774912,16595107840,16593002496,16593485824,16596668416,16598691840,16607469568,16599904256,16590753792,16644947968,16684613632,16714768384,16714768384,16781234176,16573353984,16575979520,16593072128,16603037696,16605077504,16599199744,16581554176,16570187776,16560140288,16561221632,16565153792,16577990656,16577200128,16578031616,16582909952,16569671680,16565702656,16560218112,16554315776],"odp_report_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"rest_requests":[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,8,2,2,2,2,2,2,2,2,3,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,2,2,2,2,2],"swap_total":[1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824],"swap_used":[122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` +const bucketStatsResponseWithMissing string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index 1b542d042dd30..fc165f4cf676c 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -125,9 +125,9 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri if c.client == nil { c.client = &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } } @@ -140,19 +140,21 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri req.SetBasicAuth(c.BasicUsername, c.BasicPassword) } - response, error := c.client.Do(req) - if error != nil { - return error + response, err := c.client.Do(req) + if err != nil { + return err } defer response.Body.Close() if response.StatusCode != 200 { - return fmt.Errorf("Failed to get stats from couchdb: HTTP responded %d", response.StatusCode) + return fmt.Errorf("failed to get stats from couchdb: HTTP responded %d", response.StatusCode) } stats := Stats{} decoder := json.NewDecoder(response.Body) - decoder.Decode(&stats) + if err := decoder.Decode(&stats); err != nil { + return fmt.Errorf("failed to decode stats from couchdb: HTTP body %q", response.Body) + } fields := map[string]interface{}{} @@ -287,9 +289,9 @@ func init() { return &CouchDB{ client: &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, }, } }) diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md index bc86ae898021c..8e2ef66f92451 100644 --- a/plugins/inputs/cpu/README.md +++ b/plugins/inputs/cpu/README.md @@ -4,14 +4,15 @@ The `cpu` plugin gather metrics on the system CPUs. #### Configuration ```toml +# Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false ``` diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index e073309e47e3b..9e795c82a589d 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -4,15 +4,16 @@ import ( "fmt" "time" + cpuUtil "github.com/shirou/gopsutil/cpu" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/system" - "github.com/shirou/gopsutil/cpu" ) type CPUStats struct { ps system.PS - lastStats map[string]cpu.TimesStat + lastStats map[string]cpuUtil.TimesStat PerCPU bool `toml:"percpu"` TotalCPU bool `toml:"totalcpu"` @@ -28,7 +29,7 @@ func NewCPUStats(ps system.PS) *CPUStats { } } -func (_ *CPUStats) Description() string { +func (c *CPUStats) Description() string { return "Read metrics about cpu usage" } @@ -37,18 +38,18 @@ var sampleConfig = ` percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false ` -func (_ *CPUStats) SampleConfig() string { +func (c *CPUStats) SampleConfig() string { return sampleConfig } -func (s *CPUStats) Gather(acc telegraf.Accumulator) error { - times, err := s.ps.CPUTimes(s.PerCPU, s.TotalCPU) +func (c *CPUStats) Gather(acc telegraf.Accumulator) error { + times, err := c.ps.CPUTimes(c.PerCPU, c.TotalCPU) if err != nil { return fmt.Errorf("error getting CPU info: %s", err) } @@ -59,10 +60,10 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { "cpu": cts.CPU, } - total := totalCpuTime(cts) - active := activeCpuTime(cts) + total := totalCPUTime(cts) + active := activeCPUTime(cts) - if s.CollectCPUTime { + if c.CollectCPUTime { // Add cpu time metrics fieldsC := map[string]interface{}{ "time_user": cts.User, @@ -76,28 +77,28 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { "time_guest": cts.Guest, "time_guest_nice": cts.GuestNice, } - if s.ReportActive { - fieldsC["time_active"] = activeCpuTime(cts) + if c.ReportActive { + fieldsC["time_active"] = activeCPUTime(cts) } acc.AddCounter("cpu", fieldsC, tags, now) } // Add in percentage - if len(s.lastStats) == 0 { + if len(c.lastStats) == 0 { // If it's the 1st gather, can't get CPU Usage stats yet continue } - lastCts, ok := s.lastStats[cts.CPU] + lastCts, ok := c.lastStats[cts.CPU] if !ok { continue } - lastTotal := totalCpuTime(lastCts) - lastActive := activeCpuTime(lastCts) + lastTotal := totalCPUTime(lastCts) + lastActive := activeCPUTime(lastCts) totalDelta := total - lastTotal if totalDelta < 0 { - err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time") + err = fmt.Errorf("current total CPU time is less than previous total CPU time") break } @@ -117,28 +118,27 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { "usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta, "usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta, } - if s.ReportActive { + if c.ReportActive { fieldsG["usage_active"] = 100 * (active - lastActive) / totalDelta } acc.AddGauge("cpu", fieldsG, tags, now) } - s.lastStats = make(map[string]cpu.TimesStat) + c.lastStats = make(map[string]cpuUtil.TimesStat) for _, cts := range times { - s.lastStats[cts.CPU] = cts + c.lastStats[cts.CPU] = cts } return err } -func totalCpuTime(t cpu.TimesStat) float64 { - total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + - t.Idle +func totalCPUTime(t cpuUtil.TimesStat) float64 { + total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Idle return total } -func activeCpuTime(t cpu.TimesStat) float64 { - active := totalCpuTime(t) - t.Idle +func activeCPUTime(t cpuUtil.TimesStat) float64 { + active := totalCPUTime(t) - t.Idle return active } diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index bf356ec7b945c..e51660a0adee6 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -4,11 +4,11 @@ import ( "fmt" "testing" + cpuUtil "github.com/shirou/gopsutil/cpu" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestCPUStats(t *testing.T) { @@ -16,7 +16,7 @@ func TestCPUStats(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpu.TimesStat{ + cts := cpuUtil.TimesStat{ CPU: "cpu0", User: 8.8, System: 8.2, @@ -30,7 +30,7 @@ func TestCPUStats(t *testing.T) { GuestNice: 0.324, } - cts2 := cpu.TimesStat{ + cts2 := cpuUtil.TimesStat{ CPU: "cpu0", User: 24.9, // increased by 16.1 System: 10.9, // increased by 2.7 @@ -44,62 +44,58 @@ func TestCPUStats(t *testing.T) { GuestNice: 2.524, // increased by 2.2 } - mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) cs := NewCPUStats(&mps) - cputags := map[string]string{ - "cpu": "cpu0", - } - err := cs.Gather(&acc) require.NoError(t, err) // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 19.9, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.8389, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.6, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 3.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 8.8, 0) + assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0) + assertContainsTaggedFloat(t, &acc, "time_active", 19.9, 0.0005) + assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.8389, 0) + assertContainsTaggedFloat(t, &acc, "time_irq", 0.6, 0) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0) + assertContainsTaggedFloat(t, &acc, "time_guest", 3.1, 0) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) cs.ps = &mps2 // Should have added cpu percentages too err = cs.Gather(&acc) require.NoError(t, err) - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 24.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 157.9798, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 42.0202, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 3.5, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.929, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 11.4, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags) - - assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 7.8, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 77.8798, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_active", 22.1202, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 0, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.0901, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 0.6, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 8.3, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 24.9, 0) + assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 157.9798, 0) + assertContainsTaggedFloat(t, &acc, "time_active", 42.0202, 0.0005) + assertContainsTaggedFloat(t, &acc, "time_nice", 3.5, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.929, 0) + assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0) + assertContainsTaggedFloat(t, &acc, "time_guest", 11.4, 0) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0) + + assertContainsTaggedFloat(t, &acc, "usage_user", 7.8, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_idle", 77.8798, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_active", 22.1202, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_nice", 0, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.0901, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_irq", 0.6, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_guest", 8.3, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005) } // Asserts that a given accumulator contains a measurement of type float64 with @@ -109,24 +105,21 @@ func TestCPUStats(t *testing.T) { // Parameters: // t *testing.T : Testing object to use // acc testutil.Accumulator: Accumulator to examine -// measurement string : Name of the measurement to examine +// field string : Name of field to examine // expectedValue float64 : Value to search for within the measurement // delta float64 : Maximum acceptable distance of an accumulated value // from the expectedValue parameter. Useful when // floating-point arithmetic imprecision makes looking // for an exact match impractical -// tags map[string]string : Tag set the found measurement must have. Set to nil to -// ignore the tag set. func assertContainsTaggedFloat( t *testing.T, acc *testutil.Accumulator, - measurement string, field string, expectedValue float64, delta float64, - tags map[string]string, ) { var actualValue float64 + measurement := "cpu" // always cpu for _, pt := range acc.Metrics { if pt.Measurement == measurement { for fieldname, value := range pt.Fields { @@ -138,8 +131,7 @@ func assertContainsTaggedFloat( return } } else { - assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", - measurement)) + require.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", measurement)) } } } @@ -148,7 +140,7 @@ func assertContainsTaggedFloat( msg := fmt.Sprintf( "Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", measurement, delta, expectedValue, actualValue) - assert.Fail(t, msg) + require.Fail(t, msg) } // TestCPUCountChange tests that no errors are encountered if the number of @@ -162,7 +154,7 @@ func TestCPUCountIncrease(t *testing.T) { cs := NewCPUStats(&mps) mps.On("CPUTimes").Return( - []cpu.TimesStat{ + []cpuUtil.TimesStat{ { CPU: "cpu0", }, @@ -172,7 +164,7 @@ func TestCPUCountIncrease(t *testing.T) { require.NoError(t, err) mps2.On("CPUTimes").Return( - []cpu.TimesStat{ + []cpuUtil.TimesStat{ { CPU: "cpu0", }, @@ -193,46 +185,42 @@ func TestCPUTimesDecrease(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpu.TimesStat{ + cts := cpuUtil.TimesStat{ CPU: "cpu0", User: 18, Idle: 80, Iowait: 2, } - cts2 := cpu.TimesStat{ + cts2 := cpuUtil.TimesStat{ CPU: "cpu0", User: 38, // increased by 20 Idle: 40, // decreased by 40 Iowait: 1, // decreased by 1 } - cts3 := cpu.TimesStat{ + cts3 := cpuUtil.TimesStat{ CPU: "cpu0", User: 56, // increased by 18 Idle: 120, // increased by 80 Iowait: 3, // increased by 2 } - mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) cs := NewCPUStats(&mps) - cputags := map[string]string{ - "cpu": "cpu0", - } - err := cs.Gather(&acc) require.NoError(t, err) // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 18, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 80, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 2, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) cs.ps = &mps2 // CPU times decreased. An error should be raised @@ -240,17 +228,17 @@ func TestCPUTimesDecrease(t *testing.T) { require.Error(t, err) mps3 := system.MockPS{} - mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil) + mps3.On("CPUTimes").Return([]cpuUtil.TimesStat{cts3}, nil) cs.ps = &mps3 err = cs.Gather(&acc) require.NoError(t, err) - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 56, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 120, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 3, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 56, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 120, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 3, 0) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 18, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 80, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_user", 18, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_idle", 80, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_iowait", 2, 0.0005) } diff --git a/plugins/inputs/csgo/README.md b/plugins/inputs/csgo/README.md new file mode 100644 index 0000000000000..b335509400426 --- /dev/null +++ b/plugins/inputs/csgo/README.md @@ -0,0 +1,38 @@ +# Counter-Strike: Global Offensive (CSGO) Input Plugin + +The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. + +#### Configuration +```toml +# Fetch metrics from a CSGO SRCDS +[[inputs.csgo]] + ## Specify servers using the following format: + ## servers = [ + ## ["ip1:port1", "rcon_password1"], + ## ["ip2:port2", "rcon_password2"], + ## ] + # + ## If no servers are specified, no data will be collected + servers = [] +``` + +### Metrics + +The plugin retrieves the output of the `stats` command that is executed via rcon. + +If no servers are specified, no data will be collected + +- csgo + - tags: + - host + - fields: + - cpu (float) + - net_in (float) + - net_out (float) + - uptime_minutes (float) + - maps (float) + - fps (float) + - players (float) + - sv_ms (float) + - variance_ms (float) + - tick_ms (float) diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go new file mode 100644 index 0000000000000..59d1110ad08a5 --- /dev/null +++ b/plugins/inputs/csgo/csgo.go @@ -0,0 +1,193 @@ +package csgo + +import ( + "encoding/json" + "errors" + "strconv" + "strings" + "sync" + "time" + + "github.com/james4k/rcon" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type statsData struct { + CPU float64 `json:"cpu"` + NetIn float64 `json:"net_in"` + NetOut float64 `json:"net_out"` + UptimeMinutes float64 `json:"uptime_minutes"` + Maps float64 `json:"maps"` + FPS float64 `json:"fps"` + Players float64 `json:"players"` + Sim float64 `json:"sv_ms"` + Variance float64 `json:"variance_ms"` + Tick float64 `json:"tick_ms"` +} + +type CSGO struct { + Servers [][]string `toml:"servers"` +} + +func (*CSGO) Description() string { + return "Fetch metrics from a CSGO SRCDS" +} + +var sampleConfig = ` + ## Specify servers using the following format: + ## servers = [ + ## ["ip1:port1", "rcon_password1"], + ## ["ip2:port2", "rcon_password2"], + ## ] + # + ## If no servers are specified, no data will be collected + servers = [] +` + +func (*CSGO) SampleConfig() string { + return sampleConfig +} + +func (s *CSGO) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + // Loop through each server and collect metrics + for _, server := range s.Servers { + wg.Add(1) + go func(ss []string) { + defer wg.Done() + acc.AddError(s.gatherServer(acc, ss, requestServer)) + }(server) + } + + wg.Wait() + return nil +} + +func init() { + inputs.Add("csgo", func() telegraf.Input { + return &CSGO{} + }) +} + +func (s *CSGO) gatherServer( + acc telegraf.Accumulator, + server []string, + request func(string, string) (string, error), +) error { + if len(server) != 2 { + return errors.New("incorrect server config") + } + + url, rconPw := server[0], server[1] + resp, err := request(url, rconPw) + if err != nil { + return err + } + + rows := strings.Split(resp, "\n") + if len(rows) < 2 { + return errors.New("bad response") + } + + fields := strings.Fields(rows[1]) + if len(fields) != 10 { + return errors.New("bad response") + } + + cpu, err := strconv.ParseFloat(fields[0], 32) + if err != nil { + return err + } + netIn, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return err + } + netOut, err := strconv.ParseFloat(fields[2], 64) + if err != nil { + return err + } + uptimeMinutes, err := strconv.ParseFloat(fields[3], 64) + if err != nil { + return err + } + maps, err := strconv.ParseFloat(fields[4], 64) + if err != nil { + return err + } + fps, err := strconv.ParseFloat(fields[5], 64) + if err != nil { + return err + } + players, err := strconv.ParseFloat(fields[6], 64) + if err != nil { + return err + } + svms, err := strconv.ParseFloat(fields[7], 64) + if err != nil { + return err + } + msVar, err := strconv.ParseFloat(fields[8], 64) + if err != nil { + return err + } + tick, err := strconv.ParseFloat(fields[9], 64) + if err != nil { + return err + } + + now := time.Now() + stats := statsData{ + CPU: cpu, + NetIn: netIn, + NetOut: netOut, + UptimeMinutes: uptimeMinutes, + Maps: maps, + FPS: fps, + Players: players, + Sim: svms, + Variance: msVar, + Tick: tick, + } + + tags := map[string]string{ + "host": url, + } + + var statsMap map[string]interface{} + marshalled, err := json.Marshal(stats) + if err != nil { + return err + } + err = json.Unmarshal(marshalled, &statsMap) + if err != nil { + return err + } + + acc.AddGauge("csgo", statsMap, tags, now) + return nil +} + +func requestServer(url string, rconPw string) (string, error) { + remoteConsole, err := rcon.Dial(url, rconPw) + if err != nil { + return "", err + } + defer remoteConsole.Close() + + reqID, err := remoteConsole.Write("stats") + if err != nil { + return "", err + } + + resp, respReqID, err := remoteConsole.Read() + if err != nil { + return "", err + } else if reqID != respReqID { + return "", errors.New("response/request mismatch") + } else { + return resp, nil + } +} diff --git a/plugins/inputs/csgo/csgo_test.go b/plugins/inputs/csgo/csgo_test.go new file mode 100644 index 0000000000000..b1d1c9b693814 --- /dev/null +++ b/plugins/inputs/csgo/csgo_test.go @@ -0,0 +1,54 @@ +package csgo + +import ( + "github.com/influxdata/telegraf/testutil" + "testing" + + "github.com/stretchr/testify/assert" +) + +const testInput = `CPU NetIn NetOut Uptime Maps FPS Players Svms +-ms ~tick +10.0 1.2 3.4 100 1 120.20 15 5.23 0.01 0.02` + +var ( + expectedOutput = statsData{ + 10.0, 1.2, 3.4, 100.0, 1, 120.20, 15, 5.23, 0.01, 0.02, + } +) + +func TestCPUStats(t *testing.T) { + c := NewCSGOStats() + var acc testutil.Accumulator + err := c.gatherServer(&acc, c.Servers[0], requestMock) + if err != nil { + t.Error(err) + } + + if !acc.HasMeasurement("csgo") { + t.Errorf("acc.HasMeasurement: expected csgo") + } + + assert.Equal(t, "1.2.3.4:1234", acc.Metrics[0].Tags["host"]) + assert.Equal(t, expectedOutput.CPU, acc.Metrics[0].Fields["cpu"]) + assert.Equal(t, expectedOutput.NetIn, acc.Metrics[0].Fields["net_in"]) + assert.Equal(t, expectedOutput.NetOut, acc.Metrics[0].Fields["net_out"]) + assert.Equal(t, expectedOutput.UptimeMinutes, acc.Metrics[0].Fields["uptime_minutes"]) + assert.Equal(t, expectedOutput.Maps, acc.Metrics[0].Fields["maps"]) + assert.Equal(t, expectedOutput.FPS, acc.Metrics[0].Fields["fps"]) + assert.Equal(t, expectedOutput.Players, acc.Metrics[0].Fields["players"]) + assert.Equal(t, expectedOutput.Sim, acc.Metrics[0].Fields["sv_ms"]) + assert.Equal(t, expectedOutput.Variance, acc.Metrics[0].Fields["variance_ms"]) + assert.Equal(t, expectedOutput.Tick, acc.Metrics[0].Fields["tick_ms"]) +} + +func requestMock(_ string, _ string) (string, error) { + return testInput, nil +} + +func NewCSGOStats() *CSGO { + return &CSGO{ + Servers: [][]string{ + {"1.2.3.4:1234", "password"}, + }, + } +} diff --git a/plugins/inputs/dcos/README.md b/plugins/inputs/dcos/README.md index 790590aeaf94b..4c9d46a921a6b 100644 --- a/plugins/inputs/dcos/README.md +++ b/plugins/inputs/dcos/README.md @@ -13,9 +13,6 @@ your database. options to exclude unneeded tags. - Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/). -- Limit series cardinality in your database using the - [`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and - [`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings. - Consider using the [Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/). - Monitor your databases diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index c7561ee359d5a..08943d13db0f9 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - jwt "github.com/dgrijalva/jwt-go" + jwt "github.com/golang-jwt/jwt/v4" ) const ( @@ -92,11 +92,10 @@ type AuthToken struct { // ClusterClient is a Client that uses the cluster URL. type ClusterClient struct { - clusterURL *url.URL - httpClient *http.Client - credentials *Credentials - token string - semaphore chan struct{} + clusterURL *url.URL + httpClient *http.Client + token string + semaphore chan struct{} } type claims struct { @@ -157,7 +156,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok return nil, err } - loc := c.url("/acs/api/v1/auth/login") + loc := c.toURL("/acs/api/v1/auth/login") req, err := http.NewRequest("POST", loc, bytes.NewBuffer(octets)) if err != nil { return nil, err @@ -209,7 +208,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) { summary := &Summary{} - err := c.doGet(ctx, c.url("/mesos/master/state-summary"), summary) + err := c.doGet(ctx, c.toURL("/mesos/master/state-summary"), summary) if err != nil { return nil, err } @@ -221,7 +220,7 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta list := []string{} path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node) - err := c.doGet(ctx, c.url(path), &list) + err := c.doGet(ctx, c.toURL(path), &list) if err != nil { return nil, err } @@ -229,16 +228,15 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta containers := make([]Container, 0, len(list)) for _, c := range list { containers = append(containers, Container{ID: c}) - } return containers, nil } -func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, error) { +func (c *ClusterClient) getMetrics(ctx context.Context, address string) (*Metrics, error) { metrics := &Metrics{} - err := c.doGet(ctx, url, metrics) + err := c.doGet(ctx, address, metrics) if err != nil { return nil, err } @@ -248,21 +246,21 @@ func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, e func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } -func createGetRequest(url string, token string) (*http.Request, error) { - req, err := http.NewRequest("GET", url, nil) +func createGetRequest(address string, token string) (*http.Request, error) { + req, err := http.NewRequest("GET", address, nil) if err != nil { return nil, err } @@ -275,8 +273,8 @@ func createGetRequest(url string, token string) (*http.Request, error) { return req, nil } -func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) error { - req, err := createGetRequest(url, c.token) +func (c *ClusterClient) doGet(ctx context.Context, address string, v interface{}) error { + req, err := createGetRequest(address, c.token) if err != nil { return err } @@ -294,6 +292,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er return err } defer func() { + //nolint:errcheck,revive // we cannot do anything if the closing fails resp.Body.Close() <-c.semaphore }() @@ -305,7 +304,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er if resp.StatusCode < 200 || resp.StatusCode >= 300 { return &APIError{ - URL: url, + URL: address, StatusCode: resp.StatusCode, Title: resp.Status, } @@ -319,10 +318,10 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er return err } -func (c *ClusterClient) url(path string) string { - url := *c.clusterURL - url.Path = path - return url.String() +func (c *ClusterClient) toURL(path string) string { + clusterURL := *c.clusterURL + clusterURL.Path = path + return clusterURL.String() } func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { @@ -330,7 +329,7 @@ func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { UID: sa.AccountID, StandardClaims: jwt.StandardClaims{ // How long we have to login with this token - ExpiresAt: time.Now().Add(5 * time.Minute).Unix(), + ExpiresAt: time.Now().Add(time.Minute * 5).Unix(), }, }) return token.SignedString(sa.PrivateKey) diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index 7d154a43e09f1..70cf9ce7cfccd 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -8,7 +8,7 @@ import ( "net/url" "testing" - jwt "github.com/dgrijalva/jwt-go" + jwt "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -142,7 +142,6 @@ func TestGetSummary(t *testing.T) { require.Equal(t, tt.expectedValue, summary) }) } - } func TestGetNodeMetrics(t *testing.T) { @@ -184,7 +183,6 @@ func TestGetNodeMetrics(t *testing.T) { require.Equal(t, tt.expectedValue, m) }) } - } func TestGetContainerMetrics(t *testing.T) { @@ -226,5 +224,4 @@ func TestGetContainerMetrics(t *testing.T) { require.Equal(t, tt.expectedValue, m) }) } - } diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 0178315bb7076..328ce394a4cf6 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rsa" "fmt" - "io/ioutil" + "os" "strings" "time" "unicode/utf8" @@ -47,13 +47,13 @@ func (c *ServiceAccount) IsExpired() bool { return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now()) } -func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) { - octets, err := ioutil.ReadFile(c.Path) +func (c *TokenCreds) Token(_ context.Context, _ Client) (string, error) { + octets, err := os.ReadFile(c.Path) if err != nil { - return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err) + return "", fmt.Errorf("error reading token file %q: %s", c.Path, err) } if !utf8.Valid(octets) { - return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path) + return "", fmt.Errorf("token file does not contain utf-8 encoded text: %s", c.Path) } token := strings.TrimSpace(string(octets)) return token, nil @@ -63,7 +63,7 @@ func (c *TokenCreds) IsExpired() bool { return true } -func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) { +func (c *NullCreds) Token(_ context.Context, _ Client) (string, error) { return "", nil } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index d74c792494378..dd8f22f7292f5 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -2,17 +2,18 @@ package dcos import ( "context" - "io/ioutil" "net/url" + "os" "sort" "strings" "sync" "time" - jwt "github.com/dgrijalva/jwt-go" + jwt "github.com/golang-jwt/jwt/v4" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -56,7 +57,7 @@ type DCOS struct { AppExclude []string MaxConnections int - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client Client @@ -66,7 +67,6 @@ type DCOS struct { nodeFilter filter.Filter containerFilter filter.Filter appFilter filter.Filter - taskNameFilter filter.Filter } func (d *DCOS) Description() string { @@ -223,7 +223,7 @@ type point struct { fields map[string]interface{} } -func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point { +func (d *DCOS) createPoints(m *Metrics) []*point { points := make(map[string]*point) for _, dp := range m.Datapoints { fieldKey := strings.Replace(dp.Name, ".", "_", -1) @@ -288,7 +288,7 @@ func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point { func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) { tm := time.Now() - points := d.createPoints(acc, m) + points := d.createPoints(m) for _, p := range points { tags := make(map[string]string) @@ -353,14 +353,14 @@ func (d *DCOS) createClient() (Client, error) { return nil, err } - url, err := url.Parse(d.ClusterURL) + address, err := url.Parse(d.ClusterURL) if err != nil { return nil, err } client := NewClusterClient( - url, - d.ResponseTimeout.Duration, + address, + time.Duration(d.ResponseTimeout), d.MaxConnections, tlsCfg, ) @@ -370,7 +370,7 @@ func (d *DCOS) createClient() (Client, error) { func (d *DCOS) createCredentials() (Credentials, error) { if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" { - bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey) + bs, err := os.ReadFile(d.ServiceAccountPrivateKey) if err != nil { return nil, err } @@ -422,10 +422,8 @@ func (d *DCOS) createFilters() error { func init() { inputs.Add("dcos", func() telegraf.Input { return &DCOS{ - MaxConnections: defaultMaxConnections, - ResponseTimeout: internal.Duration{ - Duration: defaultResponseTimeout, - }, + MaxConnections: defaultMaxConnections, + ResponseTimeout: config.Duration(defaultResponseTimeout), } }) } diff --git a/plugins/inputs/dcos/dcos_test.go b/plugins/inputs/dcos/dcos_test.go index 3914fa5777714..828fd0af647ab 100644 --- a/plugins/inputs/dcos/dcos_test.go +++ b/plugins/inputs/dcos/dcos_test.go @@ -203,7 +203,6 @@ func TestAddNodeMetrics(t *testing.T) { } }) } - } func TestAddContainerMetrics(t *testing.T) { diff --git a/plugins/inputs/directory_monitor/README.md b/plugins/inputs/directory_monitor/README.md new file mode 100644 index 0000000000000..66d9eb51fce79 --- /dev/null +++ b/plugins/inputs/directory_monitor/README.md @@ -0,0 +1,48 @@ +# Directory Monitor Input Plugin + +This plugin monitors a single directory (without looking at sub-directories), and takes in each file placed in the directory. +The plugin will gather all files in the directory at a configurable interval (`monitor_interval`), and parse the ones that haven't been picked up yet. + +This plugin is intended to read files that are moved or copied to the monitored directory, and thus files should also not be used by another process or else they may fail to be gathered. Please be advised that this plugin pulls files directly after they've been in the directory for the length of the configurable `directory_duration_threshold`, and thus files should not be written 'live' to the monitored directory. If you absolutely must write files directly, they must be guaranteed to finish writing before the `directory_duration_threshold`. + +### Configuration: + +```toml +[[inputs.directory_monitor]] + ## The directory to monitor and read files from. + directory = "" + # + ## The directory to move finished files to. + finished_directory = "" + # + ## The directory to move files to upon file error. + ## If not provided, erroring files will stay in the monitored directory. + # error_directory = "" + # + ## The amount of time a file is allowed to sit in the directory before it is picked up. + ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, + ## set this higher so that the plugin will wait until the file is fully copied to the directory. + # directory_duration_threshold = "50ms" + # + ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. + # files_to_monitor = ["^.*\.csv"] + # + ## A list of files to ignore, if necessary. Supports regex. + # files_to_ignore = [".DS_Store"] + # + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set to the size of the output's metric_buffer_limit. + ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. + # max_buffered_metrics = 10000 + # + ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. + ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. + # file_queue_size = 100000 + # + ## The dataformat to be read from the files. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec + data_format = "influx" +``` diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go new file mode 100644 index 0000000000000..a58c039422757 --- /dev/null +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -0,0 +1,413 @@ +package directory_monitor + +import ( + "bufio" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "sync" + "time" + + "golang.org/x/sync/semaphore" + "gopkg.in/djherbis/times.v1" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/selfstat" +) + +const sampleConfig = ` + ## The directory to monitor and read files from. + directory = "" + # + ## The directory to move finished files to. + finished_directory = "" + # + ## The directory to move files to upon file error. + ## If not provided, erroring files will stay in the monitored directory. + # error_directory = "" + # + ## The amount of time a file is allowed to sit in the directory before it is picked up. + ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, + ## set this higher so that the plugin will wait until the file is fully copied to the directory. + # directory_duration_threshold = "50ms" + # + ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. + # files_to_monitor = ["^.*\.csv"] + # + ## A list of files to ignore, if necessary. Supports regex. + # files_to_ignore = [".DS_Store"] + # + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set to the size of the output's metric_buffer_limit. + ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. + # max_buffered_metrics = 10000 + # + ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. + ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. + # file_queue_size = 100000 + # + ## The dataformat to be read from the files. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec + data_format = "influx" +` + +var ( + defaultFilesToMonitor = []string{} + defaultFilesToIgnore = []string{} + defaultMaxBufferedMetrics = 10000 + defaultDirectoryDurationThreshold = config.Duration(0 * time.Millisecond) + defaultFileQueueSize = 100000 +) + +type DirectoryMonitor struct { + Directory string `toml:"directory"` + FinishedDirectory string `toml:"finished_directory"` + ErrorDirectory string `toml:"error_directory"` + + FilesToMonitor []string `toml:"files_to_monitor"` + FilesToIgnore []string `toml:"files_to_ignore"` + MaxBufferedMetrics int `toml:"max_buffered_metrics"` + DirectoryDurationThreshold config.Duration `toml:"directory_duration_threshold"` + Log telegraf.Logger `toml:"-"` + FileQueueSize int `toml:"file_queue_size"` + + filesInUse sync.Map + cancel context.CancelFunc + context context.Context + parserFunc parsers.ParserFunc + filesProcessed selfstat.Stat + filesDropped selfstat.Stat + waitGroup *sync.WaitGroup + acc telegraf.TrackingAccumulator + sem *semaphore.Weighted + fileRegexesToMatch []*regexp.Regexp + fileRegexesToIgnore []*regexp.Regexp + filesToProcess chan string +} + +func (monitor *DirectoryMonitor) SampleConfig() string { + return sampleConfig +} + +func (monitor *DirectoryMonitor) Description() string { + return "Ingests files in a directory and then moves them to a target directory." +} + +func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error { + // Get all files sitting in the directory. + files, err := os.ReadDir(monitor.Directory) + if err != nil { + return fmt.Errorf("unable to monitor the targeted directory: %w", err) + } + + for _, file := range files { + filePath := monitor.Directory + "/" + file.Name() + + // We've been cancelled via Stop(). + if monitor.context.Err() != nil { + //nolint:nilerr // context cancelation is not an error + return nil + } + + stat, err := times.Stat(filePath) + if err != nil { + continue + } + + timeThresholdExceeded := time.Since(stat.AccessTime()) >= time.Duration(monitor.DirectoryDurationThreshold) + + // If file is decaying, process it. + if timeThresholdExceeded { + monitor.processFile(file) + } + } + + return nil +} + +func (monitor *DirectoryMonitor) Start(acc telegraf.Accumulator) error { + // Use tracking to determine when more metrics can be added without overflowing the outputs. + monitor.acc = acc.WithTracking(monitor.MaxBufferedMetrics) + go func() { + for range monitor.acc.Delivered() { + monitor.sem.Release(1) + } + }() + + // Monitor the files channel and read what they receive. + monitor.waitGroup.Add(1) + go func() { + monitor.Monitor() + monitor.waitGroup.Done() + }() + + return nil +} + +func (monitor *DirectoryMonitor) Stop() { + // Before stopping, wrap up all file-reading routines. + monitor.cancel() + close(monitor.filesToProcess) + monitor.Log.Warnf("Exiting the Directory Monitor plugin. Waiting to quit until all current files are finished.") + monitor.waitGroup.Wait() +} + +func (monitor *DirectoryMonitor) Monitor() { + for filePath := range monitor.filesToProcess { + if monitor.context.Err() != nil { + return + } + + // Prevent goroutines from taking the same file as another. + if _, exists := monitor.filesInUse.LoadOrStore(filePath, true); exists { + continue + } + + monitor.read(filePath) + + // We've finished reading the file and moved it away, delete it from files in use. + monitor.filesInUse.Delete(filePath) + } +} + +func (monitor *DirectoryMonitor) processFile(file os.DirEntry) { + if file.IsDir() { + return + } + + filePath := monitor.Directory + "/" + file.Name() + + // File must be configured to be monitored, if any configuration... + if !monitor.isMonitoredFile(file.Name()) { + return + } + + // ...and should not be configured to be ignored. + if monitor.isIgnoredFile(file.Name()) { + return + } + + select { + case monitor.filesToProcess <- filePath: + default: + } +} + +func (monitor *DirectoryMonitor) read(filePath string) { + // Open, read, and parse the contents of the file. + err := monitor.ingestFile(filePath) + if _, isPathError := err.(*os.PathError); isPathError { + return + } + + // Handle a file read error. We don't halt execution but do document, log, and move the problematic file. + if err != nil { + monitor.Log.Errorf("Error while reading file: '" + filePath + "'. " + err.Error()) + monitor.filesDropped.Incr(1) + if monitor.ErrorDirectory != "" { + monitor.moveFile(filePath, monitor.ErrorDirectory) + } + return + } + + // File is finished, move it to the 'finished' directory. + monitor.moveFile(filePath, monitor.FinishedDirectory) + monitor.filesProcessed.Incr(1) +} + +func (monitor *DirectoryMonitor) ingestFile(filePath string) error { + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + + parser, err := monitor.parserFunc() + if err != nil { + return fmt.Errorf("E! Creating parser: %s", err.Error()) + } + + // Handle gzipped files. + var reader io.Reader + if filepath.Ext(filePath) == ".gz" { + reader, err = gzip.NewReader(file) + if err != nil { + return err + } + } else { + reader = file + } + + return monitor.parseFile(parser, reader) +} + +func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader) error { + // Read the file line-by-line and parse with the configured parse method. + firstLine := true + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + metrics, err := monitor.parseLine(parser, scanner.Bytes(), firstLine) + if err != nil { + return err + } + firstLine = false + + if err := monitor.sendMetrics(metrics); err != nil { + return err + } + } + + return nil +} + +func (monitor *DirectoryMonitor) parseLine(parser parsers.Parser, line []byte, firstLine bool) ([]telegraf.Metric, error) { + switch parser.(type) { + case *csv.Parser: + // The CSV parser parses headers in Parse and skips them in ParseLine. + if firstLine { + return parser.Parse(line) + } + + m, err := parser.ParseLine(string(line)) + if err != nil { + return nil, err + } + + if m != nil { + return []telegraf.Metric{m}, nil + } + + return []telegraf.Metric{}, nil + default: + return parser.Parse(line) + } +} + +func (monitor *DirectoryMonitor) sendMetrics(metrics []telegraf.Metric) error { + // Report the metrics for the file. + for _, m := range metrics { + // Block until metric can be written. + if err := monitor.sem.Acquire(monitor.context, 1); err != nil { + return err + } + monitor.acc.AddTrackingMetricGroup([]telegraf.Metric{m}) + } + return nil +} + +func (monitor *DirectoryMonitor) moveFile(filePath string, directory string) { + err := os.Rename(filePath, directory+"/"+filepath.Base(filePath)) + + if err != nil { + monitor.Log.Errorf("Error while moving file '" + filePath + "' to another directory. Error: " + err.Error()) + } +} + +func (monitor *DirectoryMonitor) isMonitoredFile(fileName string) bool { + if len(monitor.fileRegexesToMatch) == 0 { + return true + } + + // Only monitor matching files. + for _, regex := range monitor.fileRegexesToMatch { + if regex.MatchString(fileName) { + return true + } + } + + return false +} + +func (monitor *DirectoryMonitor) isIgnoredFile(fileName string) bool { + // Skip files that are set to be ignored. + for _, regex := range monitor.fileRegexesToIgnore { + if regex.MatchString(fileName) { + return true + } + } + + return false +} + +func (monitor *DirectoryMonitor) SetParserFunc(fn parsers.ParserFunc) { + monitor.parserFunc = fn +} + +func (monitor *DirectoryMonitor) Init() error { + if monitor.Directory == "" || monitor.FinishedDirectory == "" { + return errors.New("missing one of the following required config options: directory, finished_directory") + } + + if monitor.FileQueueSize <= 0 { + return errors.New("file queue size needs to be more than 0") + } + + // Finished directory can be created if not exists for convenience. + if _, err := os.Stat(monitor.FinishedDirectory); os.IsNotExist(err) { + err = os.Mkdir(monitor.FinishedDirectory, 0777) + if err != nil { + return err + } + } + + monitor.filesDropped = selfstat.Register("directory_monitor", "files_dropped", map[string]string{}) + monitor.filesProcessed = selfstat.Register("directory_monitor", "files_processed", map[string]string{}) + + // If an error directory should be used but has not been configured yet, create one ourselves. + if monitor.ErrorDirectory != "" { + if _, err := os.Stat(monitor.ErrorDirectory); os.IsNotExist(err) { + err := os.Mkdir(monitor.ErrorDirectory, 0777) + if err != nil { + return err + } + } + } + + monitor.waitGroup = &sync.WaitGroup{} + monitor.sem = semaphore.NewWeighted(int64(monitor.MaxBufferedMetrics)) + monitor.context, monitor.cancel = context.WithCancel(context.Background()) + monitor.filesToProcess = make(chan string, monitor.FileQueueSize) + + // Establish file matching / exclusion regexes. + for _, matcher := range monitor.FilesToMonitor { + regex, err := regexp.Compile(matcher) + if err != nil { + return err + } + monitor.fileRegexesToMatch = append(monitor.fileRegexesToMatch, regex) + } + + for _, matcher := range monitor.FilesToIgnore { + regex, err := regexp.Compile(matcher) + if err != nil { + return err + } + monitor.fileRegexesToIgnore = append(monitor.fileRegexesToIgnore, regex) + } + + return nil +} + +func init() { + inputs.Add("directory_monitor", func() telegraf.Input { + return &DirectoryMonitor{ + FilesToMonitor: defaultFilesToMonitor, + FilesToIgnore: defaultFilesToIgnore, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + DirectoryDurationThreshold: defaultDirectoryDurationThreshold, + FileQueueSize: defaultFileQueueSize, + } + }) +} diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go new file mode 100644 index 0000000000000..7cda5f2d7b639 --- /dev/null +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -0,0 +1,136 @@ +package directory_monitor + +import ( + "bytes" + "compress/gzip" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" +) + +func TestCSVGZImport(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + testCsvGzFile := "test.csv.gz" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 1, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString("thing,color\nsky,blue\ngrass,green\nclifford,red\n") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Write csv.gz file to process into the 'process' directory. + var b bytes.Buffer + w := gzip.NewWriter(&b) + _, err = w.Write([]byte("thing,color\nsky,blue\ngrass,green\nclifford,red\n")) + require.NoError(t, err) + err = w.Close() + require.NoError(t, err) + err = os.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(6) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 6) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvGzFile)) + require.NoError(t, err) +} + +func TestMultipleJSONFileImports(t *testing.T) { + acc := testutil.Accumulator{} + testJSONFile := "test.json" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 1000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "json", + JSONNameKey: "Name", + } + + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + + // Let's drop a 5-line LINE-DELIMITED json. + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testJSONFile)) + require.NoError(t, err) + _, err = f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}\n{\"Name\": \"event2\",\"Speed\": 500,\"Length\": 1.4}\n{\"Name\": \"event3\",\"Speed\": 200,\"Length\": 10.23}\n{\"Name\": \"event4\",\"Speed\": 80,\"Length\": 250}\n{\"Name\": \"event5\",\"Speed\": 120.77,\"Length\": 25.97}") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + err = r.Start(&acc) + r.Log = testutil.Logger{} + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(5) + r.Stop() + + // Verify that we read each JSON line once to a single metric. + require.Equal(t, len(acc.Metrics), 5) +} diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index b2c7e540038bb..0a0fbf6f728a3 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -13,13 +13,13 @@ type DiskStats struct { ps system.PS // Legacy support - Mountpoints []string `toml:"mountpoints"` + LegacyMountPoints []string `toml:"mountpoints"` MountPoints []string `toml:"mount_points"` IgnoreFS []string `toml:"ignore_fs"` } -func (_ *DiskStats) Description() string { +func (ds *DiskStats) Description() string { return "Read metrics about disk usage by mount point" } @@ -32,17 +32,17 @@ var diskSampleConfig = ` ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ` -func (_ *DiskStats) SampleConfig() string { +func (ds *DiskStats) SampleConfig() string { return diskSampleConfig } -func (s *DiskStats) Gather(acc telegraf.Accumulator) error { +func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { // Legacy support: - if len(s.Mountpoints) != 0 { - s.MountPoints = s.Mountpoints + if len(ds.LegacyMountPoints) != 0 { + ds.MountPoints = ds.LegacyMountPoints } - disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS) + disks, partitions, err := ds.ps.DiskUsage(ds.MountPoints, ds.IgnoreFS) if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) } @@ -59,9 +59,9 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error { "fstype": du.Fstype, "mode": mountOpts.Mode(), } - var used_percent float64 + var usedPercent float64 if du.Used+du.Free > 0 { - used_percent = float64(du.Used) / + usedPercent = float64(du.Used) / (float64(du.Used) + float64(du.Free)) * 100 } @@ -69,7 +69,7 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error { "total": du.Total, "free": du.Free, "used": du.Used, - "used_percent": used_percent, + "used_percent": usedPercent, "inodes_total": du.InodesTotal, "inodes_free": du.InodesFree, "inodes_used": du.InodesUsed, diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index aeb2ae92bd77f..47a822b4410bf 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -1,15 +1,16 @@ package disk import ( + "fmt" "os" "testing" - "github.com/influxdata/telegraf/plugins/inputs/system" - "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/disk" - "github.com/stretchr/testify/assert" + diskUtil "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/system" + "github.com/influxdata/telegraf/testutil" ) type MockFileInfo struct { @@ -24,7 +25,7 @@ func TestDiskUsage(t *testing.T) { var acc testutil.Accumulator var err error - psAll := []disk.PartitionStat{ + psAll := []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -38,7 +39,7 @@ func TestDiskUsage(t *testing.T) { Opts: "rw,noatime,nodiratime,errors=remount-ro", }, } - duAll := []disk.UsageStat{ + duAll := []diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -71,16 +72,16 @@ func TestDiskUsage(t *testing.T) { numDiskMetrics := acc.NFields() expectedAllDiskMetrics := 14 - assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) + require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "fstype": "ext4", "device": "sda", "mode": "ro", } tags2 := map[string]string{ - "path": "/home", + "path": fmt.Sprintf("%chome", os.PathSeparator), "fstype": "ext4", "device": "sdb", "mode": "rw", @@ -110,26 +111,28 @@ func TestDiskUsage(t *testing.T) { // We expect 6 more DiskMetrics to show up with an explicit match on "/" // and /home not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) // We should see all the diskpoints as MountPoints includes both // / and /home err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) } func TestDiskUsageHostMountPrefix(t *testing.T) { tests := []struct { name string - partitionStats []disk.PartitionStat - usageStats []*disk.UsageStat + partitionStats []diskUtil.PartitionStat + usageStats []*diskUtil.UsageStat hostMountPrefix string expectedTags map[string]string expectedFields map[string]interface{} }{ { name: "no host mount prefix", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -137,14 +140,14 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: "ro", }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/", Total: 42, }, }, expectedTags: map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -161,7 +164,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs/var", @@ -169,7 +172,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: "ro", }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/hostfs/var", Total: 42, @@ -177,7 +180,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, hostMountPrefix: "/hostfs", expectedTags: map[string]string{ - "path": "/var", + "path": fmt.Sprintf("%cvar", os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -194,7 +197,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix exact match", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs", @@ -202,7 +205,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: "ro", }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/hostfs", Total: 42, @@ -210,7 +213,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, hostMountPrefix: "/hostfs", expectedTags: map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -258,7 +261,7 @@ func TestDiskStats(t *testing.T) { var acc testutil.Accumulator var err error - duAll := []*disk.UsageStat{ + duAll := []*diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -280,7 +283,7 @@ func TestDiskStats(t *testing.T) { InodesUsed: 2000, }, } - duFiltered := []*disk.UsageStat{ + duFiltered := []*diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -293,7 +296,7 @@ func TestDiskStats(t *testing.T) { }, } - psAll := []*disk.PartitionStat{ + psAll := []*diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -308,7 +311,7 @@ func TestDiskStats(t *testing.T) { }, } - psFiltered := []*disk.PartitionStat{ + psFiltered := []*diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -326,7 +329,7 @@ func TestDiskStats(t *testing.T) { numDiskMetrics := acc.NFields() expectedAllDiskMetrics := 14 - assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) + require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ "path": "/", @@ -365,10 +368,12 @@ func TestDiskStats(t *testing.T) { // We expect 6 more DiskMetrics to show up with an explicit match on "/" // and /home not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) // We should see all the diskpoints as MountPoints includes both // / and /home err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) } diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index 9c1e20ebdc5de..9458b2af7a68f 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -30,7 +30,7 @@ type DiskIO struct { initialized bool } -func (_ *DiskIO) Description() string { +func (d *DiskIO) Description() string { return "Read metrics about disk IO by device" } @@ -62,61 +62,60 @@ var diskIOsampleConfig = ` # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] ` -func (_ *DiskIO) SampleConfig() string { +func (d *DiskIO) SampleConfig() string { return diskIOsampleConfig } // hasMeta reports whether s contains any special glob characters. func hasMeta(s string) bool { - return strings.IndexAny(s, "*?[") >= 0 + return strings.ContainsAny(s, "*?[") } -func (s *DiskIO) init() error { - for _, device := range s.Devices { +func (d *DiskIO) init() error { + for _, device := range d.Devices { if hasMeta(device) { - filter, err := filter.Compile(s.Devices) + deviceFilter, err := filter.Compile(d.Devices) if err != nil { return fmt.Errorf("error compiling device pattern: %s", err.Error()) } - s.deviceFilter = filter + d.deviceFilter = deviceFilter } } - s.initialized = true + d.initialized = true return nil } -func (s *DiskIO) Gather(acc telegraf.Accumulator) error { - if !s.initialized { - err := s.init() +func (d *DiskIO) Gather(acc telegraf.Accumulator) error { + if !d.initialized { + err := d.init() if err != nil { return err } } devices := []string{} - if s.deviceFilter == nil { - devices = s.Devices + if d.deviceFilter == nil { + devices = d.Devices } - diskio, err := s.ps.DiskIO(devices) + diskio, err := d.ps.DiskIO(devices) if err != nil { return fmt.Errorf("error getting disk io info: %s", err.Error()) } for _, io := range diskio { - match := false - if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) { + if d.deviceFilter != nil && d.deviceFilter.Match(io.Name) { match = true } tags := map[string]string{} var devLinks []string - tags["name"], devLinks = s.diskName(io.Name) + tags["name"], devLinks = d.diskName(io.Name) - if s.deviceFilter != nil && !match { + if d.deviceFilter != nil && !match { for _, devLink := range devLinks { - if s.deviceFilter.Match(devLink) { + if d.deviceFilter.Match(devLink) { match = true break } @@ -126,11 +125,11 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { } } - for t, v := range s.diskTags(io.Name) { + for t, v := range d.diskTags(io.Name) { tags[t] = v } - if !s.SkipSerialNumber { + if !d.SkipSerialNumber { if len(io.SerialNumber) != 0 { tags["serial"] = io.SerialNumber } else { @@ -157,23 +156,23 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { return nil } -func (s *DiskIO) diskName(devName string) (string, []string) { - di, err := s.diskInfo(devName) +func (d *DiskIO) diskName(devName string) (string, []string) { + di, err := d.diskInfo(devName) devLinks := strings.Split(di["DEVLINKS"], " ") for i, devLink := range devLinks { devLinks[i] = strings.TrimPrefix(devLink, "/dev/") } - if len(s.NameTemplates) == 0 { + if len(d.NameTemplates) == 0 { return devName, devLinks } if err != nil { - s.Log.Warnf("Error gathering disk info: %s", err) + d.Log.Warnf("Error gathering disk info: %s", err) return devName, devLinks } - for _, nt := range s.NameTemplates { + for _, nt := range d.NameTemplates { miss := false name := varRegex.ReplaceAllStringFunc(nt, func(sub string) string { sub = sub[1:] // strip leading '$' @@ -195,19 +194,19 @@ func (s *DiskIO) diskName(devName string) (string, []string) { return devName, devLinks } -func (s *DiskIO) diskTags(devName string) map[string]string { - if len(s.DeviceTags) == 0 { +func (d *DiskIO) diskTags(devName string) map[string]string { + if len(d.DeviceTags) == 0 { return nil } - di, err := s.diskInfo(devName) + di, err := d.diskInfo(devName) if err != nil { - s.Log.Warnf("Error gathering disk info: %s", err) + d.Log.Warnf("Error gathering disk info: %s", err) return nil } tags := map[string]string{} - for _, dt := range s.DeviceTags { + for _, dt := range d.DeviceTags { if v, ok := di[dt]; ok { tags[dt] = v } diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index f2499ca17c1c2..c356d49cb7b68 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -16,9 +16,7 @@ type diskInfoCache struct { values map[string]string } -var udevPath = "/run/udev/data" - -func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { +func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { var err error var stat unix.Stat_t @@ -28,33 +26,51 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { return nil, err } - if s.infoCache == nil { - s.infoCache = map[string]diskInfoCache{} + if d.infoCache == nil { + d.infoCache = map[string]diskInfoCache{} } - ic, ok := s.infoCache[devName] + ic, ok := d.infoCache[devName] if ok && stat.Mtim.Nano() == ic.modifiedAt { return ic.values, nil } - major := unix.Major(uint64(stat.Rdev)) - minor := unix.Minor(uint64(stat.Rdev)) - udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) - - di := map[string]string{} + var udevDataPath string + if ok && len(ic.udevDataPath) > 0 { + // We can reuse the udev data path from a "previous" entry. + // This allows us to also "poison" it during test scenarios + udevDataPath = ic.udevDataPath + } else { + major := unix.Major(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures + minor := unix.Minor(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures + udevDataPath = fmt.Sprintf("/run/udev/data/b%d:%d", major, minor) - s.infoCache[devName] = diskInfoCache{ - modifiedAt: stat.Mtim.Nano(), - udevDataPath: udevDataPath, - values: di, + _, err := os.Stat(udevDataPath) + if err != nil { + // This path failed, try the fallback .udev style (non-systemd) + udevDataPath = fmt.Sprintf("/dev/.udev/db/block:%s", devName) + _, err := os.Stat(udevDataPath) + if err != nil { + // Giving up, cannot retrieve disk info + return nil, err + } + } } - + // Final open of the confirmed (or the previously detected/used) udev file f, err := os.Open(udevDataPath) if err != nil { return nil, err } defer f.Close() + di := map[string]string{} + + d.infoCache[devName] = diskInfoCache{ + modifiedAt: stat.Mtim.Nano(), + udevDataPath: udevDataPath, + values: di, + } + scnr := bufio.NewScanner(f) var devlinks bytes.Buffer for scnr.Scan() { @@ -64,9 +80,12 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { } if l[:2] == "S:" { if devlinks.Len() > 0 { + //nolint:errcheck,revive // this will never fail devlinks.WriteString(" ") } + //nolint:errcheck,revive // this will never fail devlinks.WriteString("/dev/") + //nolint:errcheck,revive // this will never fail devlinks.WriteString(l[2:]) continue } diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 1cb031bdce553..8a76e230cbb98 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -1,13 +1,12 @@ +//go:build linux // +build linux package diskio import ( - "io/ioutil" "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -19,19 +18,32 @@ S:foo/bar/devlink1 `) // setupNullDisk sets up fake udev info as if /dev/null were a disk. -func setupNullDisk(t *testing.T) func() error { - td, err := ioutil.TempDir("", ".telegraf.TestDiskInfo") +func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() { + td, err := os.CreateTemp("", ".telegraf.DiskInfoTest") require.NoError(t, err) - origUdevPath := udevPath + if s.infoCache == nil { + s.infoCache = make(map[string]diskInfoCache) + } + ic, ok := s.infoCache[devName] + if !ok { + // No previous calls for the device were done, easy to poison the cache + s.infoCache[devName] = diskInfoCache{ + modifiedAt: 0, + udevDataPath: td.Name(), + values: map[string]string{}, + } + } + origUdevPath := ic.udevDataPath - cleanFunc := func() error { - udevPath = origUdevPath - return os.RemoveAll(td) + cleanFunc := func() { + ic.udevDataPath = origUdevPath + //nolint:errcheck,revive // we cannot do anything if file cannot be removed + os.Remove(td.Name()) } - udevPath = td - err = ioutil.WriteFile(td+"/b1:3", nullDiskInfo, 0644) // 1:3 is the 'null' device + ic.udevDataPath = td.Name() + _, err = td.Write(nullDiskInfo) if err != nil { cleanFunc() t.Fatal(err) @@ -41,34 +53,29 @@ func setupNullDisk(t *testing.T) func() error { } func TestDiskInfo(t *testing.T) { - clean := setupNullDisk(t) - defer clean() - s := &DiskIO{} + clean := setupNullDisk(t, s, "null") + defer clean() di, err := s.diskInfo("null") require.NoError(t, err) - assert.Equal(t, "myval1", di["MY_PARAM_1"]) - assert.Equal(t, "myval2", di["MY_PARAM_2"]) - assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) + require.Equal(t, "myval1", di["MY_PARAM_1"]) + require.Equal(t, "myval2", di["MY_PARAM_2"]) + require.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // test that data is cached - err = clean() - require.NoError(t, err) + clean() di, err = s.diskInfo("null") require.NoError(t, err) - assert.Equal(t, "myval1", di["MY_PARAM_1"]) - assert.Equal(t, "myval2", di["MY_PARAM_2"]) - assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) - + require.Equal(t, "myval1", di["MY_PARAM_1"]) + require.Equal(t, "myval2", di["MY_PARAM_2"]) + require.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // unfortunately we can't adjust mtime on /dev/null to test cache invalidation } // DiskIOStats.diskName isn't a linux specific function, but dependent // functions are a no-op on non-Linux. func TestDiskIOStats_diskName(t *testing.T) { - defer setupNullDisk(t)() - tests := []struct { templates []string expected string @@ -85,22 +92,24 @@ func TestDiskIOStats_diskName(t *testing.T) { } for _, tc := range tests { - s := DiskIO{ - NameTemplates: tc.templates, - } - name, _ := s.diskName("null") - assert.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) + func() { + s := DiskIO{ + NameTemplates: tc.templates, + } + defer setupNullDisk(t, &s, "null")() //nolint:revive // done on purpose, cleaning will be executed properly + name, _ := s.diskName("null") + require.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) + }() } } // DiskIOStats.diskTags isn't a linux specific function, but dependent // functions are a no-op on non-Linux. func TestDiskIOStats_diskTags(t *testing.T) { - defer setupNullDisk(t)() - s := &DiskIO{ DeviceTags: []string{"MY_PARAM_2"}, } + defer setupNullDisk(t, s, "null")() //nolint:revive // done on purpose, cleaning will be executed properly dt := s.diskTags("null") - assert.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) + require.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) } diff --git a/plugins/inputs/diskio/diskio_other.go b/plugins/inputs/diskio/diskio_other.go index 07fb8c3b87faa..458a64c13f7bb 100644 --- a/plugins/inputs/diskio/diskio_other.go +++ b/plugins/inputs/diskio/diskio_other.go @@ -1,9 +1,10 @@ +//go:build !linux // +build !linux package diskio type diskInfoCache struct{} -func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { +func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { return nil, nil } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 6585ab88eb587..6fa63ec8bd874 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -18,8 +18,7 @@ import ( type Disque struct { Servers []string - c net.Conn - buf []byte + c net.Conn } var sampleConfig = ` @@ -32,11 +31,11 @@ var sampleConfig = ` var defaultTimeout = 5 * time.Second -func (r *Disque) SampleConfig() string { +func (d *Disque) SampleConfig() string { return sampleConfig } -func (r *Disque) Description() string { +func (d *Disque) Description() string { return "Read metrics from one or many disque servers" } @@ -64,21 +63,20 @@ var ErrProtocolError = errors.New("disque protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Disque) Gather(acc telegraf.Accumulator) error { - if len(g.Servers) == 0 { - url := &url.URL{ +func (d *Disque) Gather(acc telegraf.Accumulator) error { + if len(d.Servers) == 0 { + address := &url.URL{ Host: ":7711", } - g.gatherServer(url, acc) - return nil + return d.gatherServer(address, acc) } var wg sync.WaitGroup - for _, serv := range g.Servers { + for _, serv := range d.Servers { u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err)) + acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) continue } else if u.Scheme == "" { // fallback to simple string based address (i.e. "10.0.0.1:10000") @@ -87,10 +85,10 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error { u.Path = "" } wg.Add(1) - go func(serv string) { + go func() { defer wg.Done() - acc.AddError(g.gatherServer(u, acc)) - }(serv) + acc.AddError(d.gatherServer(u, acc)) + }() } wg.Wait() @@ -100,9 +98,8 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error { const defaultPort = "7711" -func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { - if g.c == nil { - +func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { + if d.c == nil { _, _, err := net.SplitHostPort(addr.Host) if err != nil { addr.Host = addr.Host + ":" + defaultPort @@ -110,13 +107,15 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout) if err != nil { - return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err) + return fmt.Errorf("unable to connect to disque server '%s': %s", addr.Host, err) } if addr.User != nil { pwd, set := addr.User.Password() if set && pwd != "" { - c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))) + if _, err := c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))); err != nil { + return err + } r := bufio.NewReader(c) @@ -130,15 +129,19 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { } } - g.c = c + d.c = c } // Extend connection - g.c.SetDeadline(time.Now().Add(defaultTimeout)) + if err := d.c.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } - g.c.Write([]byte("info\r\n")) + if _, err := d.c.Write([]byte("info\r\n")); err != nil { + return err + } - r := bufio.NewReader(g.c) + r := bufio.NewReader(d.c) line, err := r.ReadString('\n') if err != nil { @@ -176,7 +179,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { parts := strings.SplitN(line, ":", 2) - name := string(parts[0]) + name := parts[0] metric, ok := Tracking[name] if !ok { diff --git a/plugins/inputs/disque/disque_test.go b/plugins/inputs/disque/disque_test.go index 1e5b764f9c820..4eacbd76c6a1e 100644 --- a/plugins/inputs/disque/disque_test.go +++ b/plugins/inputs/disque/disque_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestDisqueGeneratesMetrics(t *testing.T) { +func TestDisqueGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -38,8 +38,12 @@ func TestDisqueGeneratesMetrics(t *testing.T) { return } - fmt.Fprintf(c, "$%d\n", len(testOutput)) - c.Write([]byte(testOutput)) + if _, err := fmt.Fprintf(c, "$%d\n", len(testOutput)); err != nil { + return + } + if _, err := c.Write([]byte(testOutput)); err != nil { + return + } } }() @@ -76,7 +80,7 @@ func TestDisqueGeneratesMetrics(t *testing.T) { acc.AssertContainsFields(t, "disque", fields) } -func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { +func TestDisqueCanPullStatsFromMultipleServersIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -104,8 +108,12 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { return } - fmt.Fprintf(c, "$%d\n", len(testOutput)) - c.Write([]byte(testOutput)) + if _, err := fmt.Fprintf(c, "$%d\n", len(testOutput)); err != nil { + return + } + if _, err := c.Write([]byte(testOutput)); err != nil { + return + } } }() diff --git a/plugins/inputs/dmcache/dmcache_linux.go b/plugins/inputs/dmcache/dmcache_linux.go index 8e8d7de918560..712e67900ba4d 100644 --- a/plugins/inputs/dmcache/dmcache_linux.go +++ b/plugins/inputs/dmcache/dmcache_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache diff --git a/plugins/inputs/dmcache/dmcache_linux_test.go b/plugins/inputs/dmcache/dmcache_linux_test.go index 30e32b1e876a4..93cd1e85e79bb 100644 --- a/plugins/inputs/dmcache/dmcache_linux_test.go +++ b/plugins/inputs/dmcache/dmcache_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache diff --git a/plugins/inputs/dmcache/dmcache_notlinux.go b/plugins/inputs/dmcache/dmcache_notlinux.go index ee1065638cab7..96aa0c65712ff 100644 --- a/plugins/inputs/dmcache/dmcache_notlinux.go +++ b/plugins/inputs/dmcache/dmcache_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package dmcache diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index c5657277073c2..a3b2f262ba7e0 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -16,12 +16,12 @@ import ( type ResultType uint64 const ( - Success ResultType = 0 - Timeout = 1 - Error = 2 + Success ResultType = iota + Timeout + Error ) -type DnsQuery struct { +type DNSQuery struct { // Domains or subdomains to query Domains []string @@ -62,14 +62,14 @@ var sampleConfig = ` # timeout = 2 ` -func (d *DnsQuery) SampleConfig() string { +func (d *DNSQuery) SampleConfig() string { return sampleConfig } -func (d *DnsQuery) Description() string { +func (d *DNSQuery) Description() string { return "Query given DNS server and gives statistics" } -func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { +func (d *DNSQuery) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup d.setDefaultValues() @@ -84,7 +84,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { "record_type": d.RecordType, } - dnsQueryTime, rcode, err := d.getDnsQueryTime(domain, server) + dnsQueryTime, rcode, err := d.getDNSQueryTime(domain, server) if rcode >= 0 { tags["rcode"] = dns.RcodeToString[rcode] fields["rcode_value"] = rcode @@ -110,7 +110,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { return nil } -func (d *DnsQuery) setDefaultValues() { +func (d *DNSQuery) setDefaultValues() { if d.Network == "" { d.Network = "udp" } @@ -133,7 +133,7 @@ func (d *DnsQuery) setDefaultValues() { } } -func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, error) { +func (d *DNSQuery) getDNSQueryTime(domain string, server string) (float64, int, error) { dnsQueryTime := float64(0) c := new(dns.Client) @@ -159,7 +159,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, return dnsQueryTime, r.Rcode, nil } -func (d *DnsQuery) parseRecordType() (uint16, error) { +func (d *DNSQuery) parseRecordType() (uint16, error) { var recordType uint16 var err error @@ -210,6 +210,6 @@ func setResult(result ResultType, fields map[string]interface{}, tags map[string func init() { inputs.Add("dns_query", func() telegraf.Input { - return &DnsQuery{} + return &DNSQuery{} }) } diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index 5a1379764cff0..c1dd7abf06121 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -18,7 +18,7 @@ func TestGathering(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -37,7 +37,7 @@ func TestGatheringMxRecord(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -57,7 +57,7 @@ func TestGatheringRootDomain(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: []string{"."}, RecordType: "MX", @@ -67,8 +67,13 @@ func TestGatheringRootDomain(t *testing.T) { "server": "8.8.8.8", "domain": ".", "record_type": "MX", + "rcode": "NOERROR", + "result": "success", + } + fields := map[string]interface{}{ + "rcode_value": int(0), + "result_code": uint64(0), } - fields := map[string]interface{}{} err := acc.GatherError(dnsConfig.Gather) assert.NoError(t, err) @@ -84,7 +89,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -93,8 +98,13 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { "server": "8.8.8.8", "domain": "google.com", "record_type": "NS", + "rcode": "NOERROR", + "result": "success", + } + fields := map[string]interface{}{ + "rcode_value": int(0), + "result_code": uint64(0), } - fields := map[string]interface{}{} err := acc.GatherError(dnsConfig.Gather) assert.NoError(t, err) @@ -110,7 +120,7 @@ func TestGatheringTimeout(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -131,7 +141,7 @@ func TestGatheringTimeout(t *testing.T) { } func TestSettingDefaultValues(t *testing.T) { - dnsConfig := DnsQuery{} + dnsConfig := DNSQuery{} dnsConfig.setDefaultValues() @@ -140,7 +150,7 @@ func TestSettingDefaultValues(t *testing.T) { assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") - dnsConfig = DnsQuery{Domains: []string{"."}} + dnsConfig = DNSQuery{Domains: []string{"."}} dnsConfig.setDefaultValues() @@ -148,7 +158,7 @@ func TestSettingDefaultValues(t *testing.T) { } func TestRecordTypeParser(t *testing.T) { - var dnsConfig = DnsQuery{} + var dnsConfig = DNSQuery{} var recordType uint16 dnsConfig.RecordType = "A" @@ -197,7 +207,7 @@ func TestRecordTypeParser(t *testing.T) { } func TestRecordTypeParserError(t *testing.T) { - var dnsConfig = DnsQuery{} + var dnsConfig = DNSQuery{} var err error dnsConfig.RecordType = "nil" diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 95394c94e9c44..8d75e641a1fb4 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -43,12 +43,29 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Timeout for docker list, info, and stats commands timeout = "5s" - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - - ## Whether to report for each container total blkio and network stats or not + + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false + + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] ## docker labels to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all labels as tags @@ -115,6 +132,22 @@ may prefer to exclude them: docker_label_exclude = ["annotation.kubernetes*"] ``` + +#### Docker-compose Labels + +Docker-compose will add labels to your containers. You can limit restrict labels to selected ones, e.g. + +``` + docker_label_include = [ + "com.docker.compose.config-hash", + "com.docker.compose.container-number", + "com.docker.compose.oneoff", + "com.docker.compose.project", + "com.docker.compose.service", + ] +``` + + ### Metrics: - docker diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index 3ea24ea742530..6abba44c549d6 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - docker "github.com/docker/docker/client" + dockerClient "github.com/docker/docker/client" ) var ( @@ -23,10 +23,11 @@ type Client interface { ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + Close() error } func NewEnvClient() (Client, error) { - client, err := docker.NewClientWithOpts(docker.FromEnv) + client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv) if err != nil { return nil, err } @@ -39,11 +40,11 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) { } httpClient := &http.Client{Transport: transport} - client, err := docker.NewClientWithOpts( - docker.WithHTTPHeaders(defaultHeaders), - docker.WithHTTPClient(httpClient), - docker.WithVersion(version), - docker.WithHost(host)) + client, err := dockerClient.NewClientWithOpts( + dockerClient.WithHTTPHeaders(defaultHeaders), + dockerClient.WithHTTPClient(httpClient), + dockerClient.WithVersion(version), + dockerClient.WithHost(host)) if err != nil { return nil, err } @@ -52,7 +53,7 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) { } type SocketClient struct { - client *docker.Client + client *dockerClient.Client } func (c *SocketClient) Info(ctx context.Context) (types.Info, error) { @@ -76,3 +77,6 @@ func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptio func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { return c.client.NodeList(ctx, options) } +func (c *SocketClient) Close() error { + return c.client.Close() +} diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index dafedacafb3f1..5320e77b27ce8 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "net/http" "regexp" "strconv" "strings" @@ -16,10 +15,12 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/docker" + "github.com/influxdata/telegraf/internal/choice" + dockerint "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -31,12 +32,14 @@ type Docker struct { GatherServices bool `toml:"gather_services"` - Timeout internal.Duration - PerDevice bool `toml:"perdevice"` - Total bool `toml:"total"` - TagEnvironment []string `toml:"tag_env"` - LabelInclude []string `toml:"docker_label_include"` - LabelExclude []string `toml:"docker_label_exclude"` + Timeout config.Duration + PerDevice bool `toml:"perdevice"` + PerDeviceInclude []string `toml:"perdevice_include"` + Total bool `toml:"total"` + TotalInclude []string `toml:"total_include"` + TagEnvironment []string `toml:"tag_env"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` ContainerInclude []string `toml:"container_name_include"` ContainerExclude []string `toml:"container_name_exclude"` @@ -54,7 +57,6 @@ type Docker struct { newClient func(string, *tls.Config) (Client, error) client Client - httpClient *http.Client engineHost string serverVersion string filtersCreated bool @@ -72,12 +74,21 @@ const ( PB = 1000 * TB defaultEndpoint = "unix:///var/run/docker.sock" + + perDeviceIncludeDeprecationWarning = "'perdevice' setting is set to 'true' so 'blkio' and 'network' metrics will " + + "be collected. Please set it to 'false' and use 'perdevice_include' instead to control this behaviour as " + + "'perdevice' will be deprecated" + + totalIncludeDeprecationWarning = "'total' setting is set to 'false' so 'blkio' and 'network' metrics will not be " + + "collected. Please set it to 'true' and use 'total_include' instead to control this behaviour as 'total' " + + "will be deprecated" ) var ( - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) - containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} - now = time.Now + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) + containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} + containerMetricClasses = []string{"cpu", "network", "blkio"} + now = time.Now ) var sampleConfig = ` @@ -110,13 +121,30 @@ var sampleConfig = ` ## Timeout for docker list, info, and stats commands timeout = "5s" - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - ## Whether to report for each container total blkio and network stats or not + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] + ## Which environment variables should we use as a tag ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] @@ -141,6 +169,41 @@ func (d *Docker) Description() string { return "Read metrics about docker containers" } +func (d *Docker) Init() error { + err := choice.CheckSlice(d.PerDeviceInclude, containerMetricClasses) + if err != nil { + return fmt.Errorf("error validating 'perdevice_include' setting : %v", err) + } + + err = choice.CheckSlice(d.TotalInclude, containerMetricClasses) + if err != nil { + return fmt.Errorf("error validating 'total_include' setting : %v", err) + } + + // Temporary logic needed for backwards compatibility until 'perdevice' setting is removed. + if d.PerDevice { + d.Log.Warn(perDeviceIncludeDeprecationWarning) + if !choice.Contains("network", d.PerDeviceInclude) { + d.PerDeviceInclude = append(d.PerDeviceInclude, "network") + } + if !choice.Contains("blkio", d.PerDeviceInclude) { + d.PerDeviceInclude = append(d.PerDeviceInclude, "blkio") + } + } + + // Temporary logic needed for backwards compatibility until 'total' setting is removed. + if !d.Total { + d.Log.Warn(totalIncludeDeprecationWarning) + if choice.Contains("cpu", d.TotalInclude) { + d.TotalInclude = []string{"cpu"} + } else { + d.TotalInclude = []string{} + } + } + + return nil +} + // Gather metrics from the docker server. func (d *Docker) Gather(acc telegraf.Accumulator) error { if d.client == nil { @@ -151,6 +214,9 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { d.client = c } + // Close any idle connections in the end of gathering + defer d.client.Close() + // Create label filters if not already created if !d.filtersCreated { err := d.createLabelFilters() @@ -197,7 +263,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { opts := types.ContainerListOptions{ Filters: filterArgs, } - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() containers, err := d.client.ContainerList(ctx, opts) @@ -225,7 +291,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) @@ -302,7 +368,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { now := time.Now() // Get info from docker daemon - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() info, err := d.client.Info(ctx) @@ -434,8 +500,7 @@ func (d *Docker) gatherContainer( var cname string for _, name := range container.Names { trimmedName := strings.TrimPrefix(name, "/") - match := d.containerFilter.Match(trimmedName) - if match { + if !strings.Contains(trimmedName, "/") { cname = trimmedName break } @@ -445,7 +510,11 @@ func (d *Docker) gatherContainer( return nil } - imageName, imageVersion := docker.ParseImage(container.Image) + if !d.containerFilter.Match(cname) { + return nil + } + + imageName, imageVersion := dockerint.ParseImage(container.Image) tags := map[string]string{ "engine_host": d.engineHost, @@ -459,7 +528,7 @@ func (d *Docker) gatherContainer( tags["source"] = hostnameFromID(container.ID) } - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() r, err := d.client.ContainerStats(ctx, container.ID, false) @@ -480,11 +549,6 @@ func (d *Docker) gatherContainer( } daemonOSType := r.OSType - // use common (printed at `docker ps`) name for container - if v.Name != "" { - tags["container_name"] = strings.TrimPrefix(v.Name, "/") - } - // Add labels to tags for k, label := range container.Labels { if d.labelFilter.Match(k) { @@ -502,7 +566,7 @@ func (d *Docker) gatherContainerInspect( daemonOSType string, v *types.StatsJSON, ) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() info, err := d.client.ContainerInspect(ctx, container.ID) @@ -518,7 +582,7 @@ func (d *Docker) gatherContainerInspect( for _, envvar := range info.Config.Env { for _, configvar := range d.TagEnvironment { dockEnv := strings.SplitN(envvar, "=", 2) - //check for presence of tag in whitelist + // check for presence of tag in whitelist if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] { tags[dockEnv[0]] = dockEnv[1] } @@ -565,18 +629,16 @@ func (d *Docker) gatherContainerInspect( } } - parseContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) + d.parseContainerStats(v, acc, tags, container.ID, daemonOSType) return nil } -func parseContainerStats( +func (d *Docker) parseContainerStats( stat *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, id string, - perDevice bool, - total bool, daemonOSType string, ) { tm := stat.Read @@ -645,48 +707,52 @@ func parseContainerStats( acc.AddFields("docker_container_mem", memfields, tags, tm) - cpufields := map[string]interface{}{ - "usage_total": stat.CPUStats.CPUUsage.TotalUsage, - "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, - "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, - "usage_system": stat.CPUStats.SystemUsage, - "throttling_periods": stat.CPUStats.ThrottlingData.Periods, - "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, - "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, - "container_id": id, - } + if choice.Contains("cpu", d.TotalInclude) { + cpufields := map[string]interface{}{ + "usage_total": stat.CPUStats.CPUUsage.TotalUsage, + "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, + "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, + "usage_system": stat.CPUStats.SystemUsage, + "throttling_periods": stat.CPUStats.ThrottlingData.Periods, + "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, + "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, + "container_id": id, + } + + if daemonOSType != "windows" { + previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage + previousSystem := stat.PreCPUStats.SystemUsage + cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) + cpufields["usage_percent"] = cpuPercent + } else { + cpuPercent := calculateCPUPercentWindows(stat) + cpufields["usage_percent"] = cpuPercent + } - if daemonOSType != "windows" { - previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage - previousSystem := stat.PreCPUStats.SystemUsage - cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) - cpufields["usage_percent"] = cpuPercent - } else { - cpuPercent := calculateCPUPercentWindows(stat) - cpufields["usage_percent"] = cpuPercent + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + acc.AddFields("docker_container_cpu", cpufields, cputags, tm) } - cputags := copyTags(tags) - cputags["cpu"] = "cpu-total" - acc.AddFields("docker_container_cpu", cpufields, cputags, tm) - - // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs - // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) - var percpuusage []uint64 - if stat.CPUStats.OnlineCPUs > 0 { - percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs] - } else { - percpuusage = stat.CPUStats.CPUUsage.PercpuUsage - } + if choice.Contains("cpu", d.PerDeviceInclude) && len(stat.CPUStats.CPUUsage.PercpuUsage) > 0 { + // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs + // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) + var percpuusage []uint64 + if stat.CPUStats.OnlineCPUs > 0 { + percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs] + } else { + percpuusage = stat.CPUStats.CPUUsage.PercpuUsage + } - for i, percpu := range percpuusage { - percputags := copyTags(tags) - percputags["cpu"] = fmt.Sprintf("cpu%d", i) - fields := map[string]interface{}{ - "usage_total": percpu, - "container_id": id, + for i, percpu := range percpuusage { + percputags := copyTags(tags) + percputags["cpu"] = fmt.Sprintf("cpu%d", i) + fields := map[string]interface{}{ + "usage_total": percpu, + "container_id": id, + } + acc.AddFields("docker_container_cpu", fields, percputags, tm) } - acc.AddFields("docker_container_cpu", fields, percputags, tm) } totalNetworkStatMap := make(map[string]interface{}) @@ -703,12 +769,12 @@ func parseContainerStats( "container_id": id, } // Create a new network tag dictionary for the "network" tag - if perDevice { + if choice.Contains("network", d.PerDeviceInclude) { nettags := copyTags(tags) nettags["network"] = network acc.AddFields("docker_container_net", netfields, nettags, tm) } - if total { + if choice.Contains("network", d.TotalInclude) { for field, value := range netfields { if field == "container_id" { continue @@ -735,27 +801,18 @@ func parseContainerStats( } // totalNetworkStatMap could be empty if container is running with --net=host. - if total && len(totalNetworkStatMap) != 0 { + if choice.Contains("network", d.TotalInclude) && len(totalNetworkStatMap) != 0 { nettags := copyTags(tags) nettags["network"] = "total" totalNetworkStatMap["container_id"] = id acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm) } - gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total) + d.gatherBlockIOMetrics(acc, stat, tags, tm, id) } -func gatherBlockIOMetrics( - stat *types.StatsJSON, - acc telegraf.Accumulator, - tags map[string]string, - tm time.Time, - id string, - perDevice bool, - total bool, -) { - blkioStats := stat.BlkioStats - // Make a map of devices to their block io stats +// Make a map of devices to their block io stats +func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interface{} { deviceStatMap := make(map[string]map[string]interface{}) for _, metric := range blkioStats.IoServiceBytesRecursive { @@ -813,16 +870,30 @@ func gatherBlockIOMetrics( device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) deviceStatMap[device]["sectors_recursive"] = metric.Value } + return deviceStatMap +} + +func (d *Docker) gatherBlockIOMetrics( + acc telegraf.Accumulator, + stat *types.StatsJSON, + tags map[string]string, + tm time.Time, + id string, +) { + perDeviceBlkio := choice.Contains("blkio", d.PerDeviceInclude) + totalBlkio := choice.Contains("blkio", d.TotalInclude) + blkioStats := stat.BlkioStats + deviceStatMap := getDeviceStatMap(blkioStats) totalStatMap := make(map[string]interface{}) for device, fields := range deviceStatMap { fields["container_id"] = id - if perDevice { + if perDeviceBlkio { iotags := copyTags(tags) iotags["device"] = device acc.AddFields("docker_container_blkio", fields, iotags, tm) } - if total { + if totalBlkio { for field, value := range fields { if field == "container_id" { continue @@ -847,7 +918,7 @@ func gatherBlockIOMetrics( } } } - if total { + if totalBlkio { totalStatMap["container_id"] = id iotags := copyTags(tags) iotags["device"] = "total" @@ -863,15 +934,6 @@ func copyTags(in map[string]string) map[string]string { return out } -func sliceContains(in string, sl []string) bool { - for _, str := range sl { - if str == in { - return true - } - } - return false -} - // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) @@ -899,20 +961,20 @@ func (d *Docker) createContainerFilters() error { d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...) } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + containerFilter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) if err != nil { return err } - d.containerFilter = filter + d.containerFilter = containerFilter return nil } func (d *Docker) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) if err != nil { return err } - d.labelFilter = filter + d.labelFilter = labelFilter return nil } @@ -920,11 +982,11 @@ func (d *Docker) createContainerStateFilters() error { if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { d.ContainerStateInclude = []string{"running"} } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + stateFilter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) if err != nil { return err } - d.stateFilter = filter + d.stateFilter = stateFilter return nil } @@ -944,12 +1006,14 @@ func (d *Docker) getNewClient() (Client, error) { func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{ - PerDevice: true, - Timeout: internal.Duration{Duration: time.Second * 5}, - Endpoint: defaultEndpoint, - newEnvClient: NewEnvClient, - newClient: NewClient, - filtersCreated: false, + PerDevice: true, + PerDeviceInclude: []string{"cpu"}, + TotalInclude: []string{"cpu", "blkio", "network"}, + Timeout: config.Duration(time.Second * 5), + Endpoint: defaultEndpoint, + newEnvClient: NewEnvClient, + newClient: NewClient, + filtersCreated: false, } }) } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index d8700217c307d..a84a6047b30aa 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -3,7 +3,8 @@ package docker import ( "context" "crypto/tls" - "io/ioutil" + "io" + "reflect" "sort" "strings" "testing" @@ -11,9 +12,11 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type MockClient struct { @@ -24,6 +27,7 @@ type MockClient struct { ServiceListF func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) TaskListF func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) NodeListF func(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + CloseF func() error } func (c *MockClient) Info(ctx context.Context) (types.Info, error) { @@ -73,6 +77,10 @@ func (c *MockClient) NodeList( return c.NodeListF(ctx, options) } +func (c *MockClient) Close() error { + return c.CloseF() +} + var baseClient = MockClient{ InfoF: func(context.Context) (types.Info, error) { return info, nil @@ -95,9 +103,12 @@ var baseClient = MockClient{ NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { return NodeList, nil }, + CloseF: func() error { + return nil + }, } -func newClient(host string, tlsConfig *tls.Config) (Client, error) { +func newClient(_ string, _ *tls.Config) (Client, error) { return &baseClient, nil } @@ -110,7 +121,12 @@ func TestDockerGatherContainerStats(t *testing.T) { "container_image": "redis/image", } - parseContainerStats(stats, &acc, tags, "123456789", true, true, "linux") + d := &Docker{ + Log: testutil.Logger{}, + PerDeviceInclude: containerMetricClasses, + TotalInclude: containerMetricClasses, + } + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") // test docker_container_net measurement netfields := map[string]interface{}{ @@ -277,6 +293,9 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { return NodeList, nil }, + CloseF: func() error { + return nil + }, }, nil }, } @@ -396,6 +415,8 @@ func TestContainerLabels(t *testing.T) { newClient: newClientFunc, LabelInclude: tt.include, LabelExclude: tt.exclude, + Total: true, + TotalInclude: []string{"cpu"}, } err := d.Gather(&acc) @@ -751,6 +772,9 @@ func TestDockerGatherInfo(t *testing.T) { newClient: newClient, TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5", "ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"}, + PerDeviceInclude: []string{"cpu", "network", "blkio"}, + Total: true, + TotalInclude: []string{""}, } err := acc.GatherError(d.Gather) @@ -904,7 +928,7 @@ func TestDockerGatherSwarmInfo(t *testing.T) { err := acc.GatherError(d.Gather) require.NoError(t, err) - d.gatherSwarmInfo(&acc) + require.NoError(t, d.gatherSwarmInfo(&acc)) // test docker_container_net measurement acc.AssertContainsTaggedFields(t, @@ -1036,7 +1060,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{"name": "logspout"}`)), + Body: io.NopCloser(strings.NewReader(`{"name": "logspout"}`)), }, nil } return &client, nil @@ -1056,7 +1080,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{}`)), + Body: io.NopCloser(strings.NewReader(`{}`)), }, nil } return &client, nil @@ -1115,5 +1139,245 @@ func TestHostnameFromID(t *testing.T) { } }) } +} + +func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { + type args struct { + stat *types.StatsJSON + tags map[string]string + id string + perDeviceInclude []string + totalInclude []string + daemonOSType string + } + + var ( + testDate = time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + metricCPUTotal = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu-total", + }, + map[string]interface{}{}, + testDate) + + metricCPU0 = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{}, + testDate) + metricCPU1 = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{}, + testDate) + + metricNetworkTotal = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "total", + }, + map[string]interface{}{}, + testDate) + + metricNetworkEth0 = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "eth0", + }, + map[string]interface{}{}, + testDate) + + metricNetworkEth1 = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "eth0", + }, + map[string]interface{}{}, + testDate) + metricBlkioTotal = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "total", + }, + map[string]interface{}{}, + testDate) + metricBlkio6_0 = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "6:0", + }, + map[string]interface{}{}, + testDate) + metricBlkio6_1 = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "6:1", + }, + map[string]interface{}{}, + testDate) + ) + stats := testStats() + tests := []struct { + name string + args args + expected []telegraf.Metric + }{ + { + name: "Per device and total metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: containerMetricClasses, + totalInclude: containerMetricClasses, + }, + expected: []telegraf.Metric{ + metricCPUTotal, metricCPU0, metricCPU1, + metricNetworkTotal, metricNetworkEth0, metricNetworkEth1, + metricBlkioTotal, metricBlkio6_0, metricBlkio6_1, + }, + }, + { + name: "Per device metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: containerMetricClasses, + totalInclude: []string{}, + }, + expected: []telegraf.Metric{ + metricCPU0, metricCPU1, + metricNetworkEth0, metricNetworkEth1, + metricBlkio6_0, metricBlkio6_1, + }, + }, + { + name: "Total metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: []string{}, + totalInclude: containerMetricClasses, + }, + expected: []telegraf.Metric{metricCPUTotal, metricNetworkTotal, metricBlkioTotal}, + }, + { + name: "Per device and total metrics disabled", + args: args{ + stat: stats, + perDeviceInclude: []string{}, + totalInclude: []string{}, + }, + expected: []telegraf.Metric{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + d := &Docker{ + Log: testutil.Logger{}, + PerDeviceInclude: tt.args.perDeviceInclude, + TotalInclude: tt.args.totalInclude, + } + d.parseContainerStats(tt.args.stat, &acc, tt.args.tags, tt.args.id, tt.args.daemonOSType) + + actual := FilterMetrics(acc.GetTelegrafMetrics(), func(m telegraf.Metric) bool { + return choice.Contains(m.Name(), + []string{"docker_container_cpu", "docker_container_net", "docker_container_blkio"}) + }) + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.OnlyTags(), testutil.SortMetrics()) + }) + } +} + +func TestDocker_Init(t *testing.T) { + type fields struct { + PerDevice bool + PerDeviceInclude []string + Total bool + TotalInclude []string + } + tests := []struct { + name string + fields fields + wantErr bool + wantPerDeviceInclude []string + wantTotalInclude []string + }{ + { + "Unsupported perdevice_include setting", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"nonExistentClass"}, + Total: false, + TotalInclude: []string{"cpu"}, + }, + true, + []string{}, + []string{}, + }, + { + "Unsupported total_include setting", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"cpu"}, + Total: false, + TotalInclude: []string{"nonExistentClass"}, + }, + true, + []string{}, + []string{}, + }, + { + "PerDevice true adds network and blkio", + fields{ + PerDevice: true, + PerDeviceInclude: []string{"cpu"}, + Total: true, + TotalInclude: []string{"cpu"}, + }, + false, + []string{"cpu", "network", "blkio"}, + []string{"cpu"}, + }, + { + "Total false removes network and blkio", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"cpu"}, + Total: false, + TotalInclude: []string{"cpu", "network", "blkio"}, + }, + false, + []string{"cpu"}, + []string{"cpu"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Docker{ + Log: testutil.Logger{}, + PerDevice: tt.fields.PerDevice, + PerDeviceInclude: tt.fields.PerDeviceInclude, + Total: tt.fields.Total, + TotalInclude: tt.fields.TotalInclude, + } + err := d.Init() + if (err != nil) != tt.wantErr { + t.Errorf("Init() error = %v, wantErr %v", err, tt.wantErr) + } + if err == nil { + if !reflect.DeepEqual(d.PerDeviceInclude, tt.wantPerDeviceInclude) { + t.Errorf("Perdevice include: got '%v', want '%v'", d.PerDeviceInclude, tt.wantPerDeviceInclude) + } + + if !reflect.DeepEqual(d.TotalInclude, tt.wantTotalInclude) { + t.Errorf("Total include: got '%v', want '%v'", d.TotalInclude, tt.wantTotalInclude) + } + } + }) + } } diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index bde0bd312c788..826f34f6703d4 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -2,7 +2,7 @@ package docker import ( "fmt" - "io/ioutil" + "io" "strings" "time" @@ -344,7 +344,7 @@ func containerStats(s string) types.ContainerStats { }, "read": "2016-02-24T11:42:27.472459608-05:00" }`, name) - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } @@ -488,7 +488,7 @@ func containerStatsWindows() types.ContainerStats { }, "name":"/gt_test_iis", }` - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } diff --git a/plugins/inputs/docker/stats_helpers.go b/plugins/inputs/docker/stats_helpers.go index 93ea2f2196baf..982f131d6d8d3 100644 --- a/plugins/inputs/docker/stats_helpers.go +++ b/plugins/inputs/docker/stats_helpers.go @@ -1,4 +1,4 @@ -// Helper functions copied from +// Package docker contains few helper functions copied from // https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go package docker diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 27462ec5a66e7..622f9924e4236 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -15,9 +15,10 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/stdcopy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -64,11 +65,6 @@ var sampleConfig = ` const ( defaultEndpoint = "unix:///var/run/docker.sock" - - // Maximum bytes of a log line before it will be split, size is mirroring - // docker code: - // https://github.com/moby/moby/blob/master/daemon/logger/copier.go#L21 - maxLineBytes = 16 * 1024 ) var ( @@ -78,16 +74,16 @@ var ( ) type DockerLogs struct { - Endpoint string `toml:"endpoint"` - FromBeginning bool `toml:"from_beginning"` - Timeout internal.Duration `toml:"timeout"` - LabelInclude []string `toml:"docker_label_include"` - LabelExclude []string `toml:"docker_label_exclude"` - ContainerInclude []string `toml:"container_name_include"` - ContainerExclude []string `toml:"container_name_exclude"` - ContainerStateInclude []string `toml:"container_state_include"` - ContainerStateExclude []string `toml:"container_state_exclude"` - IncludeSourceTag bool `toml:"source_tag"` + Endpoint string `toml:"endpoint"` + FromBeginning bool `toml:"from_beginning"` + Timeout config.Duration `toml:"timeout"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` + ContainerInclude []string `toml:"container_name_include"` + ContainerExclude []string `toml:"container_name_exclude"` + ContainerStateInclude []string `toml:"container_state_include"` + ContainerStateExclude []string `toml:"container_state_exclude"` + IncludeSourceTag bool `toml:"source_tag"` tlsint.ClientConfig @@ -160,18 +156,16 @@ func (d *DockerLogs) Init() error { return nil } -func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) error { +func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) { d.mu.Lock() defer d.mu.Unlock() d.containerList[containerID] = cancel - return nil } -func (d *DockerLogs) removeFromContainerList(containerID string) error { +func (d *DockerLogs) removeFromContainerList(containerID string) { d.mu.Lock() defer d.mu.Unlock() delete(d.containerList, containerID) - return nil } func (d *DockerLogs) containerInContainerList(containerID string) bool { @@ -181,13 +175,12 @@ func (d *DockerLogs) containerInContainerList(containerID string) bool { return ok } -func (d *DockerLogs) cancelTails() error { +func (d *DockerLogs) cancelTails() { d.mu.Lock() defer d.mu.Unlock() for _, cancel := range d.containerList { cancel() } - return nil } func (d *DockerLogs) matchedContainerName(names []string) string { @@ -207,7 +200,7 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { ctx := context.Background() acc.SetPrecision(time.Nanosecond) - ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout)) defer cancel() containers, err := d.client.ContainerList(ctx, d.opts) if err != nil { @@ -243,7 +236,7 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { } func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) { - ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout)) defer cancel() c, err := d.client.ContainerInspect(ctx, container.ID) if err != nil { @@ -308,16 +301,14 @@ func (d *DockerLogs) tailContainerLogs( // multiplexed. if hasTTY { return tailStream(acc, tags, container.ID, logReader, "tty") - } else { - return tailMultiplexed(acc, tags, container.ID, logReader) } + return tailMultiplexed(acc, tags, container.ID, logReader) } func parseLine(line []byte) (time.Time, string, error) { parts := bytes.SplitN(line, []byte(" "), 2) - switch len(parts) { - case 1: + if len(parts) == 1 { parts = append(parts, []byte("")) } @@ -407,8 +398,11 @@ func tailMultiplexed( }() _, err := stdcopy.StdCopy(outWriter, errWriter, src) + //nolint:errcheck,revive // we cannot do anything if the closing fails outWriter.Close() + //nolint:errcheck,revive // we cannot do anything if the closing fails errWriter.Close() + //nolint:errcheck,revive // we cannot do anything if the closing fails src.Close() wg.Wait() return err @@ -427,20 +421,20 @@ func (d *DockerLogs) Stop() { // Following few functions have been inherited from telegraf docker input plugin func (d *DockerLogs) createContainerFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + containerFilter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) if err != nil { return err } - d.containerFilter = filter + d.containerFilter = containerFilter return nil } func (d *DockerLogs) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) if err != nil { return err } - d.labelFilter = filter + d.labelFilter = labelFilter return nil } @@ -448,18 +442,18 @@ func (d *DockerLogs) createContainerStateFilters() error { if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { d.ContainerStateInclude = []string{"running"} } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + stateFilter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) if err != nil { return err } - d.stateFilter = filter + d.stateFilter = stateFilter return nil } func init() { inputs.Add("docker_log", func() telegraf.Input { return &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), Endpoint: defaultEndpoint, newEnvClient: NewEnvClient, newClient: NewClient, diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index c8903c9d8ec28..49a73ebe9f1bb 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/stdcopy" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -138,8 +138,8 @@ func Test(t *testing.T) { ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { var buf bytes.Buffer w := stdcopy.NewStdWriter(&buf, stdcopy.Stdout) - w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) - return &Response{Reader: &buf}, nil + _, err := w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) + return &Response{Reader: &buf}, err }, }, expected: []telegraf.Metric{ @@ -165,7 +165,7 @@ func Test(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator plugin := &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil }, containerList: make(map[string]context.CancelFunc), IncludeSourceTag: true, diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index 3b6129488dae3..9e44d99edbc07 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -14,6 +14,9 @@ the [upgrading steps][upgrading]. ## specify dovecot servers via an address:port list ## e.g. ## localhost:24242 + ## or as an UDS socket + ## e.g. + ## /var/run/dovecot/old-stats ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index 66282c43423b2..fbc3b79058187 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -48,13 +48,10 @@ var validQuery = map[string]bool{ func (d *Dovecot) SampleConfig() string { return sampleConfig } -const defaultPort = "24242" - // Reads stats from all configured servers. func (d *Dovecot) Gather(acc telegraf.Accumulator) error { if !validQuery[d.Type] { - return fmt.Errorf("Error: %s is not a valid query type\n", - d.Type) + return fmt.Errorf("error: %s is not a valid query type", d.Type) } if len(d.Servers) == 0 { @@ -81,19 +78,29 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { } func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error { - _, _, err := net.SplitHostPort(addr) - if err != nil { - return fmt.Errorf("%q on url %s", err.Error(), addr) + var proto string + + if strings.HasPrefix(addr, "/") { + proto = "unix" + } else { + proto = "tcp" + + _, _, err := net.SplitHostPort(addr) + if err != nil { + return fmt.Errorf("%q on url %s", err.Error(), addr) + } } - c, err := net.DialTimeout("tcp", addr, defaultTimeout) + c, err := net.DialTimeout(proto, addr, defaultTimeout) if err != nil { - return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err) + return fmt.Errorf("unable to connect to dovecot server '%s': %s", addr, err) } defer c.Close() // Extend connection - c.SetDeadline(time.Now().Add(defaultTimeout)) + if err := c.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return fmt.Errorf("setting deadline failed for dovecot server '%s': %s", addr, err) + } msg := fmt.Sprintf("EXPORT\t%s", qtype) if len(filter) > 0 { @@ -101,17 +108,30 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri } msg += "\n" - c.Write([]byte(msg)) + if _, err := c.Write([]byte(msg)); err != nil { + return fmt.Errorf("writing message %q failed for dovecot server '%s': %s", msg, addr, err) + } var buf bytes.Buffer - io.Copy(&buf, c) + if _, err := io.Copy(&buf, c); err != nil { + // We need to accept the timeout here as reading from the connection will only terminate on EOF + // or on a timeout to happen. As EOF for TCP connections will only be sent on connection closing, + // the only way to get the whole message is to wait for the timeout to happen. + if nerr, ok := err.(net.Error); !ok || !nerr.Timeout() { + return fmt.Errorf("copying message failed for dovecot server '%s': %s", addr, err) + } + } - host, _, _ := net.SplitHostPort(addr) + var host string + if strings.HasPrefix(addr, "/") { + host = addr + } else { + host, _, _ = net.SplitHostPort(addr) + } return gatherStats(&buf, acc, host, qtype) } func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, host string, qtype string) error { - lines := strings.Split(buf.String(), "\n") head := strings.Split(lines[0], "\t") vals := lines[1:] @@ -170,13 +190,11 @@ func splitSec(tm string) (sec int64, msec int64) { } func timeParser(tm string) time.Time { - sec, msec := splitSec(tm) return time.Unix(sec, msec) } func secParser(tm string) float64 { - sec, msec := splitSec(tm) return float64(sec) + (float64(msec) / 1000000.0) } diff --git a/plugins/inputs/dovecot/dovecot_test.go b/plugins/inputs/dovecot/dovecot_test.go index c801d4f0ca5f7..f9ce76de947d6 100644 --- a/plugins/inputs/dovecot/dovecot_test.go +++ b/plugins/inputs/dovecot/dovecot_test.go @@ -1,7 +1,12 @@ package dovecot import ( + "bufio" "bytes" + "io" + "net" + "net/textproto" + "os" "testing" "time" @@ -9,8 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestDovecot(t *testing.T) { - +func TestDovecotIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -43,11 +47,49 @@ func TestDovecot(t *testing.T) { var acc testutil.Accumulator + // Test type=global server=unix + addr := "/tmp/socket" + wait := make(chan int) + go func() { + defer close(wait) + + la, err := net.ResolveUnixAddr("unix", addr) + require.NoError(t, err) + + l, err := net.ListenUnix("unix", la) + require.NoError(t, err) + defer l.Close() + defer os.Remove(addr) + + wait <- 0 + conn, err := l.Accept() + require.NoError(t, err) + defer conn.Close() + + readertp := textproto.NewReader(bufio.NewReader(conn)) + _, err = readertp.ReadLine() + require.NoError(t, err) + + buf := bytes.NewBufferString(sampleGlobal) + _, err = io.Copy(conn, buf) + require.NoError(t, err) + }() + + // Wait for server to start + <-wait + + d := &Dovecot{Servers: []string{addr}, Type: "global"} + err := d.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{"server": addr, "type": "global"} + acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) + // Test type=global - tags := map[string]string{"server": "dovecot.test", "type": "global"} + tags = map[string]string{"server": "dovecot.test", "type": "global"} buf := bytes.NewBufferString(sampleGlobal) - err := gatherStats(buf, &acc, "dovecot.test", "global") + err = gatherStats(buf, &acc, "dovecot.test", "global") require.NoError(t, err) acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) @@ -63,7 +105,7 @@ func TestDovecot(t *testing.T) { // Test type=ip tags = map[string]string{"server": "dovecot.test", "type": "ip", "ip": "192.168.0.100"} - buf = bytes.NewBufferString(sampleIp) + buf = bytes.NewBufferString(sampleIP) err = gatherStats(buf, &acc, "dovecot.test", "ip") require.NoError(t, err) @@ -103,7 +145,6 @@ func TestDovecot(t *testing.T) { require.NoError(t, err) acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) - } const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits @@ -112,7 +153,7 @@ const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connec const sampleDomain = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits domain.test 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` -const sampleIp = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits +const sampleIP = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits 192.168.0.100 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` const sampleUser = `user reset_timestamp last_update num_logins num_cmds user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits diff --git a/plugins/inputs/dpdk/README.md b/plugins/inputs/dpdk/README.md new file mode 100644 index 0000000000000..00398760d2e9d --- /dev/null +++ b/plugins/inputs/dpdk/README.md @@ -0,0 +1,200 @@ +# Data Plane Development Kit (DPDK) Input Plugin +The `dpdk` plugin collects metrics exposed by applications built with [Data Plane Development Kit](https://www.dpdk.org/) +which is an extensive set of open source libraries designed for accelerating packet processing workloads. + +DPDK provides APIs that enable exposing various statistics from the devices used by DPDK applications and enable exposing +KPI metrics directly from applications. Device statistics include e.g. common statistics available across NICs, like: +received and sent packets, received and sent bytes etc. In addition to this generic statistics, an extended statistics API +is available that allows providing more detailed, driver-specific metrics that are not available as generic statistics. + +[DPDK Release 20.05](https://doc.dpdk.org/guides/rel_notes/release_20_05.html) introduced updated telemetry interface +that enables DPDK libraries and applications to provide their telemetry. This is referred to as `v2` version of this +socket-based telemetry interface. This release enabled e.g. reading driver-specific extended stats (`/ethdev/xstats`) +via this new interface. + +[DPDK Release 20.11](https://doc.dpdk.org/guides/rel_notes/release_20_11.html) introduced reading via `v2` interface +common statistics (`/ethdev/stats`) in addition to existing (`/ethdev/xstats`). + +The example usage of `v2` telemetry interface can be found in [Telemetry User Guide](https://doc.dpdk.org/guides/howto/telemetry.html). +A variety of [DPDK Sample Applications](https://doc.dpdk.org/guides/sample_app_ug/index.html) is also available for users +to discover and test the capabilities of DPDK libraries and to explore the exposed metrics. + +> **DPDK Version Info:** This plugin uses this `v2` interface to read telemetry data from applications build with +> `DPDK version >= 20.05`. The default configuration include reading common statistics from `/ethdev/stats` that is +> available from `DPDK version >= 20.11`. When using `DPDK 20.05 <= version < DPDK 20.11` it is recommended to disable +> querying `/ethdev/stats` by setting corresponding `exclude_commands` configuration option. + +> **NOTE:** Since DPDK will most likely run with root privileges, the socket telemetry interface exposed by DPDK +> will also require root access. This means that either access permissions have to be adjusted for socket telemetry +> interface to allow Telegraf to access it, or Telegraf should run with root privileges. + +## Configuration +This plugin offers multiple configuration options, please review examples below for additional usage information. +```toml +# Reads metrics from DPDK applications using v2 telemetry interface. +[[inputs.dpdk]] + ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. + # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" + + ## Duration that defines how long the connected socket client will wait for a response before terminating connection. + ## This includes both writing to and reading from socket. Since it's local socket access + ## to a fast packet processing application, the timeout should be sufficient for most users. + ## Setting the value to 0 disables the timeout (not recommended) + # socket_access_timeout = "200ms" + + ## Enables telemetry data collection for selected device types. + ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). + ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). + # device_types = ["ethdev"] + + ## List of custom, application-specific telemetry commands to query + ## The list of available commands depend on the application deployed. Applications can register their own commands + ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## additional_commands = ["/l3fwd-power/stats"] + # additional_commands = [] + + ## Allows turning off collecting data for individual "ethdev" commands. + ## Remove "/ethdev/link_status" from list to start getting link status metrics. + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## [inputs.dpdk.tags] + ## dpdk_instance = "my-fwd-app" +``` + +### Example: Minimal Configuration for NIC metrics +This configuration allows getting metrics for all devices reported via `/ethdev/list` command: +* `/ethdev/stats` - basic device statistics (since `DPDK 20.11`) +* `/ethdev/xstats` - extended device statistics +* `/ethdev/link_status` - up/down link status +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] +``` +Since this configuration will query `/ethdev/link_status` it's recommended to increase timeout to `socket_access_timeout = "10s"`. + +The [plugin collecting interval](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) +should be adjusted accordingly (e.g. `interval = "30s"`). + +### Example: Excluding NIC link status from being collected +Checking link status depending on underlying implementation may take more time to complete. +This configuration can be used to exclude this telemetry command to allow faster response for metrics. +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] +``` +A separate plugin instance with higher timeout settings can be used to get `/ethdev/link_status` independently. +Consult [Independent NIC link status configuration](#example-independent-nic-link-status-configuration) +and [Getting metrics from multiple DPDK instances running on same host](#example-getting-metrics-from-multiple-dpdk-instances-running-on-same-host) +examples for further details. + +### Example: Independent NIC link status configuration +This configuration allows getting `/ethdev/link_status` using separate configuration, with higher timeout. +```toml +[[inputs.dpdk]] + interval = "30s" + socket_access_timeout = "10s" + device_types = ["ethdev"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/stats", "/ethdev/xstats"] +``` + +### Example: Getting application-specific metrics +This configuration allows reading custom metrics exposed by applications. Example telemetry command obtained from +[L3 Forwarding with Power Management Sample Application](https://doc.dpdk.org/guides/sample_app_ug/l3_forward_power_man.html). +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] +``` +Command entries specified in `additional_commands` should match DPDK command format: +* Command entry format: either `command` or `command,params` for commands that expect parameters, where comma (`,`) separates command from params. +* Command entry length (command with params) should be `< 1024` characters. +* Command length (without params) should be `< 56` characters. +* Commands have to start with `/`. + +Providing invalid commands will prevent the plugin from starting. Additional commands allow duplicates, but they +will be removed during execution so each command will be executed only once during each metric gathering interval. + +### Example: Getting metrics from multiple DPDK instances running on same host +This configuration allows getting metrics from two separate applications exposing their telemetry interfaces +via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` allows distinguishing between them. +```toml +# Instance #1 - L3 Forwarding with Power Management Application +[[inputs.dpdk]] + socket_path = "/var/run/dpdk/rte/l3fwd-power_telemetry.v2" + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + [inputs.dpdk.tags] + dpdk_instance = "l3fwd-power" + +# Instance #2 - L2 Forwarding with Intel Cache Allocation Technology (CAT) Application +[[inputs.dpdk]] + socket_path = "/var/run/dpdk/rte/l2fwd-cat_telemetry.v2" + device_types = ["ethdev"] + +[inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + [inputs.dpdk.tags] + dpdk_instance = "l2fwd-cat" +``` +This utilizes Telegraf's standard capability of [adding custom tags](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) +to input plugin's measurements. + +## Metrics +The DPDK socket accepts `command,params` requests and returns metric data in JSON format. All metrics from DPDK socket +become flattened using [Telegraf's JSON Flattener](../../parsers/json/README.md) and exposed as fields. +If DPDK response contains no information (is empty or is null) then such response will be discarded. + +> **NOTE:** Since DPDK allows registering custom metrics in its telemetry framework the JSON response from DPDK +> may contain various sets of metrics. While metrics from `/ethdev/stats` should be most stable, the `/ethdev/xstats` +> may contain driver-specific metrics (depending on DPDK application configuration). The application-specific commands +> like `/l3fwd-power/stats` can return their own specific set of metrics. + +## Example output +The output consists of plugin name (`dpdk`), and a set of tags that identify querying hierarchy: +``` +dpdk,host=dpdk-host,dpdk_instance=l3fwd-power,command=/ethdev/stats,params=0 [fields] [timestamp] +``` + +| Tag | Description | +|-----|-------------| +| `host` | hostname of the machine (consult [Telegraf Agent configuration](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#agent) for additional details) | +| `dpdk_instance` | custom tag from `[inputs.dpdk.tags]` (optional) | +| `command` | executed command (without params) | +| `params` | command parameter, e.g. for `/ethdev/stats` it is the id of NIC as exposed by `/ethdev/list`
For DPDK app that uses 2 NICs the metrics will output e.g. `params=0`, `params=1`. | + +When running plugin configuration below... +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + [inputs.dpdk.tags] + dpdk_instance = "l3fwd-power" +``` + +...expected output for `dpdk` plugin instance running on host named `host=dpdk-host`: +``` +dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 +dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 +dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 out_octets_encrypted=0,rx_fcoe_mbuf_allocation_errors=0,tx_q1packets=0,rx_priority0_xoff_packets=0,rx_priority7_xoff_packets=0,rx_errors=0,mac_remote_errors=0,in_pkts_invalid=0,tx_priority3_xoff_packets=0,tx_errors=0,rx_fcoe_bytes=0,rx_flow_control_xon_packets=0,rx_priority4_xoff_packets=0,tx_priority2_xoff_packets=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_management_packets=0,rx_priority7_dropped=0,rx_priority4_dropped=0,in_pkts_unchecked=0,rx_error_bytes=0,rx_size_256_to_511_packets=0,tx_priority4_xoff_packets=0,rx_priority6_xon_packets=0,tx_priority4_xon_to_xoff_packets=0,in_pkts_delayed=0,rx_priority0_mbuf_allocation_errors=0,out_octets_protected=0,tx_priority7_xon_to_xoff_packets=0,tx_priority1_xon_to_xoff_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_priority6_xon_to_xoff_packets=0,flow_director_filter_add_errors=0,rx_total_packets=99,rx_crc_errors=0,flow_director_filter_remove_errors=0,rx_missed_errors=0,tx_size_64_packets=0,rx_priority3_dropped=0,flow_director_matched_filters=0,tx_priority2_xon_to_xoff_packets=0,rx_priority1_xon_packets=0,rx_size_65_to_127_packets=99,rx_fragment_errors=0,in_pkts_notusingsa=0,rx_q0bytes=7162,rx_fcoe_dropped=0,rx_priority1_dropped=0,rx_fcoe_packets=0,rx_priority5_xoff_packets=0,out_pkts_protected=0,tx_total_packets=0,rx_priority2_dropped=0,in_pkts_late=0,tx_q1bytes=0,in_pkts_badtag=0,rx_multicast_packets=99,rx_priority6_xoff_packets=0,tx_flow_control_xoff_packets=0,rx_flow_control_xoff_packets=0,rx_priority0_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,rx_priority7_mbuf_allocation_errors=0,tx_priority0_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,tx_q0packets=0,tx_xoff_packets=0,rx_size_512_to_1023_packets=0,rx_priority3_xon_packets=0,rx_q0errors=0,rx_oversize_errors=0,tx_priority4_xon_packets=0,tx_priority5_xoff_packets=0,rx_priority5_xon_packets=0,rx_total_missed_packets=0,rx_priority4_mbuf_allocation_errors=0,tx_priority1_xon_packets=0,tx_management_packets=0,rx_priority5_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,rx_undersize_errors=0,tx_priority1_xoff_packets=0,rx_q0packets=99,tx_q2packets=0,tx_priority6_xon_packets=0,rx_good_packets=99,tx_priority5_xon_packets=0,tx_size_256_to_511_packets=0,rx_priority6_dropped=0,rx_broadcast_packets=0,tx_size_512_to_1023_packets=0,tx_priority3_xon_to_xoff_packets=0,in_pkts_unknownsci=0,in_octets_validated=0,tx_priority6_xoff_packets=0,tx_priority7_xoff_packets=0,rx_jabber_errors=0,tx_priority7_xon_packets=0,tx_priority0_xon_packets=0,in_pkts_unusedsa=0,tx_priority0_xoff_packets=0,mac_local_errors=33,rx_total_bytes=7162,in_pkts_notvalid=0,rx_length_errors=0,in_octets_decrypted=0,rx_size_128_to_255_packets=0,rx_good_bytes=7162,tx_size_65_to_127_packets=0,rx_mac_short_packet_dropped=0,tx_size_1024_to_max_packets=0,rx_priority2_mbuf_allocation_errors=0,flow_director_added_filters=0,tx_multicast_packets=0,rx_fcoe_crc_errors=0,rx_priority1_xoff_packets=0,flow_director_missed_filters=0,rx_xon_packets=0,tx_size_128_to_255_packets=0,out_pkts_encrypted=0,rx_priority4_xon_packets=0,rx_priority0_dropped=0,rx_size_1024_to_max_packets=0,tx_good_bytes=0,rx_management_dropped=0,rx_mbuf_allocation_errors=0,tx_xon_packets=0,rx_priority3_xoff_packets=0,tx_good_packets=0,tx_fcoe_bytes=0,rx_priority6_mbuf_allocation_errors=0,rx_priority2_xon_packets=0,tx_broadcast_packets=0,tx_q2bytes=0,rx_priority7_xon_packets=0,out_pkts_untagged=0,rx_priority2_xoff_packets=0,rx_priority1_mbuf_allocation_errors=0,tx_q0bytes=0,rx_size_64_packets=0,rx_priority5_dropped=0,tx_priority2_xon_packets=0,in_pkts_nosci=0,flow_director_removed_filters=0,in_pkts_ok=0,rx_l3_l4_xsum_error=0,rx_priority3_mbuf_allocation_errors=0,tx_priority3_xon_packets=0 1606310780000000000 +dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 tx_priority5_xoff_packets=0,in_pkts_unknownsci=0,tx_q0packets=0,tx_total_packets=0,rx_crc_errors=0,rx_priority4_xoff_packets=0,rx_priority5_dropped=0,tx_size_65_to_127_packets=0,rx_good_packets=98,tx_priority6_xoff_packets=0,tx_fcoe_bytes=0,out_octets_protected=0,out_pkts_encrypted=0,rx_priority1_xon_packets=0,tx_size_128_to_255_packets=0,rx_flow_control_xoff_packets=0,rx_priority7_xoff_packets=0,tx_priority0_xon_to_xoff_packets=0,rx_broadcast_packets=0,tx_priority1_xon_packets=0,rx_xon_packets=0,rx_fragment_errors=0,tx_flow_control_xoff_packets=0,tx_q0bytes=0,out_pkts_untagged=0,rx_priority4_xon_packets=0,tx_priority5_xon_packets=0,rx_priority1_xoff_packets=0,rx_good_bytes=7092,rx_priority4_mbuf_allocation_errors=0,in_octets_decrypted=0,tx_priority2_xon_to_xoff_packets=0,rx_priority3_dropped=0,tx_multicast_packets=0,mac_local_errors=33,in_pkts_ok=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_q0errors=0,flow_director_added_filters=0,rx_size_256_to_511_packets=0,rx_priority3_xon_packets=0,rx_l3_l4_xsum_error=0,rx_priority6_dropped=0,in_pkts_notvalid=0,rx_size_64_packets=0,tx_management_packets=0,rx_length_errors=0,tx_priority7_xon_to_xoff_packets=0,rx_mbuf_allocation_errors=0,rx_missed_errors=0,rx_priority1_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,tx_priority3_xoff_packets=0,in_pkts_delayed=0,tx_errors=0,rx_size_512_to_1023_packets=0,tx_priority4_xon_packets=0,rx_q0bytes=7092,in_pkts_unchecked=0,tx_size_512_to_1023_packets=0,rx_fcoe_packets=0,in_pkts_nosci=0,rx_priority6_mbuf_allocation_errors=0,rx_priority1_dropped=0,tx_q2packets=0,rx_priority7_dropped=0,tx_size_1024_to_max_packets=0,rx_management_packets=0,rx_multicast_packets=98,rx_total_bytes=7092,mac_remote_errors=0,tx_priority3_xon_packets=0,rx_priority2_mbuf_allocation_errors=0,rx_priority5_mbuf_allocation_errors=0,tx_q2bytes=0,rx_size_128_to_255_packets=0,in_pkts_badtag=0,out_pkts_protected=0,rx_management_dropped=0,rx_fcoe_bytes=0,flow_director_removed_filters=0,tx_priority2_xoff_packets=0,rx_fcoe_crc_errors=0,rx_priority0_mbuf_allocation_errors=0,rx_priority0_xon_packets=0,rx_fcoe_dropped=0,tx_priority1_xon_to_xoff_packets=0,rx_size_65_to_127_packets=98,rx_q0packets=98,tx_priority0_xoff_packets=0,rx_priority6_xon_packets=0,rx_total_packets=98,rx_undersize_errors=0,flow_director_missed_filters=0,rx_jabber_errors=0,in_pkts_invalid=0,in_pkts_late=0,rx_priority5_xon_packets=0,tx_priority4_xoff_packets=0,out_octets_encrypted=0,tx_q1packets=0,rx_priority5_xoff_packets=0,rx_priority6_xoff_packets=0,rx_errors=0,in_octets_validated=0,rx_priority3_xoff_packets=0,tx_priority4_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,rx_priority0_dropped=0,flow_director_filter_add_errors=0,tx_q1bytes=0,tx_priority6_xon_to_xoff_packets=0,flow_director_matched_filters=0,tx_priority2_xon_packets=0,rx_fcoe_mbuf_allocation_errors=0,rx_priority2_xoff_packets=0,tx_priority7_xoff_packets=0,rx_priority0_xoff_packets=0,rx_oversize_errors=0,in_pkts_notusingsa=0,tx_size_64_packets=0,rx_size_1024_to_max_packets=0,tx_priority6_xon_packets=0,rx_priority2_dropped=0,rx_priority4_dropped=0,rx_priority7_mbuf_allocation_errors=0,rx_flow_control_xon_packets=0,tx_good_bytes=0,tx_priority3_xon_to_xoff_packets=0,rx_total_missed_packets=0,rx_error_bytes=0,tx_priority7_xon_packets=0,rx_mac_short_packet_dropped=0,tx_priority1_xoff_packets=0,tx_good_packets=0,tx_broadcast_packets=0,tx_xon_packets=0,in_pkts_unusedsa=0,rx_priority2_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,flow_director_filter_remove_errors=0,rx_priority3_mbuf_allocation_errors=0,tx_priority0_xon_packets=0,rx_priority7_xon_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_xoff_packets=0,tx_size_256_to_511_packets=0 1606310780000000000 +dpdk,command=/ethdev/link_status,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 status="UP",speed=10000,duplex="full-duplex" 1606310780000000000 +dpdk,command=/ethdev/link_status,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 status="UP",speed=10000,duplex="full-duplex" 1606310780000000000 +dpdk,command=/l3fwd-power/stats,dpdk_instance=l3fwd-power,host=dpdk-host empty_poll=49506395979901,full_poll=0,busy_percent=0 1606310780000000000 +``` diff --git a/plugins/inputs/dpdk/dpdk.go b/plugins/inputs/dpdk/dpdk.go new file mode 100644 index 0000000000000..261784942232c --- /dev/null +++ b/plugins/inputs/dpdk/dpdk.go @@ -0,0 +1,264 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +const ( + description = "Reads metrics from DPDK applications using v2 telemetry interface." + sampleConfig = ` + ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. + # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" + + ## Duration that defines how long the connected socket client will wait for a response before terminating connection. + ## This includes both writing to and reading from socket. Since it's local socket access + ## to a fast packet processing application, the timeout should be sufficient for most users. + ## Setting the value to 0 disables the timeout (not recommended) + # socket_access_timeout = "200ms" + + ## Enables telemetry data collection for selected device types. + ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). + ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). + # device_types = ["ethdev"] + + ## List of custom, application-specific telemetry commands to query + ## The list of available commands depend on the application deployed. Applications can register their own commands + ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## additional_commands = ["/l3fwd-power/stats"] + # additional_commands = [] + + ## Allows turning off collecting data for individual "ethdev" commands. + ## Remove "/ethdev/link_status" from list to start getting link status metrics. + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## [inputs.dpdk.tags] + ## dpdk_instance = "my-fwd-app" +` + defaultPathToSocket = "/var/run/dpdk/rte/dpdk_telemetry.v2" + defaultAccessTimeout = config.Duration(200 * time.Millisecond) + maxCommandLength = 56 + maxCommandLengthWithParams = 1024 + pluginName = "dpdk" + ethdevListCommand = "/ethdev/list" + rawdevListCommand = "/rawdev/list" +) + +type dpdk struct { + SocketPath string `toml:"socket_path"` + AccessTimeout config.Duration `toml:"socket_access_timeout"` + DeviceTypes []string `toml:"device_types"` + EthdevConfig ethdevConfig `toml:"ethdev"` + AdditionalCommands []string `toml:"additional_commands"` + Log telegraf.Logger `toml:"-"` + + connector *dpdkConnector + rawdevCommands []string + ethdevCommands []string + ethdevExcludedCommandsFilter filter.Filter +} + +type ethdevConfig struct { + EthdevExcludeCommands []string `toml:"exclude_commands"` +} + +func init() { + inputs.Add(pluginName, func() telegraf.Input { + dpdk := &dpdk{ + // Setting it here (rather than in `Init()`) to distinguish between "zero" value, + // default value and don't having value in config at all. + AccessTimeout: defaultAccessTimeout, + } + return dpdk + }) +} + +func (dpdk *dpdk) SampleConfig() string { + return sampleConfig +} + +func (dpdk *dpdk) Description() string { + return description +} + +// Performs validation of all parameters from configuration +func (dpdk *dpdk) Init() error { + if dpdk.SocketPath == "" { + dpdk.SocketPath = defaultPathToSocket + dpdk.Log.Debugf("using default '%v' path for socket_path", defaultPathToSocket) + } + + if dpdk.DeviceTypes == nil { + dpdk.DeviceTypes = []string{"ethdev"} + } + + var err error + if err = isSocket(dpdk.SocketPath); err != nil { + return err + } + + dpdk.rawdevCommands = []string{"/rawdev/xstats"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats", "/ethdev/link_status"} + + if err = dpdk.validateCommands(); err != nil { + return err + } + + if dpdk.AccessTimeout < 0 { + return fmt.Errorf("socket_access_timeout should be positive number or equal to 0 (to disable timeouts)") + } + + if len(dpdk.AdditionalCommands) == 0 && len(dpdk.DeviceTypes) == 0 { + return fmt.Errorf("plugin was configured with nothing to read") + } + + dpdk.ethdevExcludedCommandsFilter, err = filter.Compile(dpdk.EthdevConfig.EthdevExcludeCommands) + if err != nil { + return fmt.Errorf("error occurred during filter prepation for ethdev excluded commands - %v", err) + } + + dpdk.connector = newDpdkConnector(dpdk.SocketPath, dpdk.AccessTimeout) + initMessage, err := dpdk.connector.connect() + if initMessage != nil { + dpdk.Log.Debugf("Successfully connected to %v running as process with PID %v with len %v", + initMessage.Version, initMessage.Pid, initMessage.MaxOutputLen) + } + return err +} + +// Checks that user-supplied commands are unique and match DPDK commands format +func (dpdk *dpdk) validateCommands() error { + dpdk.AdditionalCommands = uniqueValues(dpdk.AdditionalCommands) + + for _, commandWithParams := range dpdk.AdditionalCommands { + if len(commandWithParams) == 0 { + return fmt.Errorf("got empty command") + } + + if commandWithParams[0] != '/' { + return fmt.Errorf("'%v' command should start with '/'", commandWithParams) + } + + if commandWithoutParams := stripParams(commandWithParams); len(commandWithoutParams) >= maxCommandLength { + return fmt.Errorf("'%v' command is too long. It shall be less than %v characters", commandWithoutParams, maxCommandLength) + } + + if len(commandWithParams) >= maxCommandLengthWithParams { + return fmt.Errorf("command with parameters '%v' shall be less than %v characters", commandWithParams, maxCommandLengthWithParams) + } + } + + return nil +} + +// Gathers all unique commands and processes each command sequentially +// Parallel processing could be achieved by running several instances of this plugin with different settings +func (dpdk *dpdk) Gather(acc telegraf.Accumulator) error { + // This needs to be done during every `Gather(...)`, because DPDK can be restarted between consecutive + // `Gather(...)` cycles which can cause that it will be exposing different set of metrics. + commands := dpdk.gatherCommands(acc) + + for _, command := range commands { + dpdk.processCommand(acc, command) + } + + return nil +} + +// Gathers all unique commands +func (dpdk *dpdk) gatherCommands(acc telegraf.Accumulator) []string { + var commands []string + if choice.Contains("ethdev", dpdk.DeviceTypes) { + ethdevCommands := removeSubset(dpdk.ethdevCommands, dpdk.ethdevExcludedCommandsFilter) + ethdevCommands, err := dpdk.appendCommandsWithParamsFromList(ethdevListCommand, ethdevCommands) + if err != nil { + acc.AddError(fmt.Errorf("error occurred during fetching of %v params - %v", ethdevListCommand, err)) + } + + commands = append(commands, ethdevCommands...) + } + + if choice.Contains("rawdev", dpdk.DeviceTypes) { + rawdevCommands, err := dpdk.appendCommandsWithParamsFromList(rawdevListCommand, dpdk.rawdevCommands) + if err != nil { + acc.AddError(fmt.Errorf("error occurred during fetching of %v params - %v", rawdevListCommand, err)) + } + + commands = append(commands, rawdevCommands...) + } + + commands = append(commands, dpdk.AdditionalCommands...) + return uniqueValues(commands) +} + +// Fetches all identifiers of devices and then creates all possible combinations of commands for each device +func (dpdk *dpdk) appendCommandsWithParamsFromList(listCommand string, commands []string) ([]string, error) { + response, err := dpdk.connector.getCommandResponse(listCommand) + if err != nil { + return nil, err + } + + params, err := jsonToArray(response, listCommand) + if err != nil { + return nil, err + } + + result := make([]string, 0, len(commands)*len(params)) + for _, command := range commands { + for _, param := range params { + result = append(result, commandWithParams(command, param)) + } + } + + return result, nil +} + +// Executes command, parses response and creates/writes metric from response +func (dpdk *dpdk) processCommand(acc telegraf.Accumulator, commandWithParams string) { + buf, err := dpdk.connector.getCommandResponse(commandWithParams) + if err != nil { + acc.AddError(err) + return + } + + var parsedResponse map[string]interface{} + err = json.Unmarshal(buf, &parsedResponse) + if err != nil { + acc.AddError(fmt.Errorf("failed to unmarshall json response from %v command - %v", commandWithParams, err)) + return + } + + command := stripParams(commandWithParams) + value := parsedResponse[command] + if isEmpty(value) { + acc.AddError(fmt.Errorf("got empty json on '%v' command", commandWithParams)) + return + } + + jf := jsonparser.JSONFlattener{} + err = jf.FullFlattenJSON("", value, true, true) + if err != nil { + acc.AddError(fmt.Errorf("failed to flatten response - %v", err)) + return + } + + acc.AddFields(pluginName, jf.Fields, map[string]string{ + "command": command, + "params": getParams(commandWithParams), + }) +} diff --git a/plugins/inputs/dpdk/dpdk_connector.go b/plugins/inputs/dpdk/dpdk_connector.go new file mode 100644 index 0000000000000..9cd9c81c4362b --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_connector.go @@ -0,0 +1,163 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "net" + "time" + + "github.com/influxdata/telegraf/config" +) + +const maxInitMessageLength = 1024 + +type initMessage struct { + Version string `json:"version"` + Pid int `json:"pid"` + MaxOutputLen uint32 `json:"max_output_len"` +} + +type dpdkConnector struct { + pathToSocket string + maxOutputLen uint32 + messageShowed bool + accessTimeout time.Duration + connection net.Conn +} + +func newDpdkConnector(pathToSocket string, accessTimeout config.Duration) *dpdkConnector { + return &dpdkConnector{ + pathToSocket: pathToSocket, + messageShowed: false, + accessTimeout: time.Duration(accessTimeout), + } +} + +// Connects to the socket +// Since DPDK is a local unix socket, it is instantly returns error or connection, so there's no need to set timeout for it +func (conn *dpdkConnector) connect() (*initMessage, error) { + connection, err := net.Dial("unixpacket", conn.pathToSocket) + if err != nil { + return nil, fmt.Errorf("failed to connect to the socket - %v", err) + } + + conn.connection = connection + result, err := conn.readMaxOutputLen() + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("%v and failed to close connection - %v", err, closeErr) + } + return nil, err + } + + return result, nil +} + +// Executes command using provided connection and returns response +// If error (such as timeout) occurred, then connection is discarded and recreated +// because otherwise behaviour of connection is undefined (e.g. it could return result of timed out command instead of latest) +func (conn *dpdkConnector) getCommandResponse(fullCommand string) ([]byte, error) { + connection, err := conn.getConnection() + if err != nil { + return nil, fmt.Errorf("failed to get connection to execute %v command - %v", fullCommand, err) + } + + err = conn.setTimeout() + if err != nil { + return nil, fmt.Errorf("failed to set timeout for %v command - %v", fullCommand, err) + } + + _, err = connection.Write([]byte(fullCommand)) + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("failed to send '%v' command - %v and failed to close connection - %v", + fullCommand, err, closeErr) + } + return nil, fmt.Errorf("failed to send '%v' command - %v", fullCommand, err) + } + + buf := make([]byte, conn.maxOutputLen) + messageLength, err := connection.Read(buf) + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("failed read response of '%v' command - %v and failed to close connection - %v", + fullCommand, err, closeErr) + } + return nil, fmt.Errorf("failed to read response of '%v' command - %v", fullCommand, err) + } + + if messageLength == 0 { + return nil, fmt.Errorf("got empty response during execution of '%v' command", fullCommand) + } + return buf[:messageLength], nil +} + +func (conn *dpdkConnector) tryClose() error { + if conn.connection == nil { + return nil + } + + err := conn.connection.Close() + conn.connection = nil + if err != nil { + return err + } + return nil +} + +func (conn *dpdkConnector) setTimeout() error { + if conn.connection == nil { + return fmt.Errorf("connection had not been established before") + } + + if conn.accessTimeout == 0 { + return conn.connection.SetDeadline(time.Time{}) + } + return conn.connection.SetDeadline(time.Now().Add(conn.accessTimeout)) +} + +// Returns connections, if connection is not created then function tries to recreate it +func (conn *dpdkConnector) getConnection() (net.Conn, error) { + if conn.connection == nil { + _, err := conn.connect() + if err != nil { + return nil, err + } + } + return conn.connection, nil +} + +// Reads InitMessage for connection. Should be read for each connection, otherwise InitMessage is returned as response for first command. +func (conn *dpdkConnector) readMaxOutputLen() (*initMessage, error) { + buf := make([]byte, maxInitMessageLength) + err := conn.setTimeout() + if err != nil { + return nil, fmt.Errorf("failed to set timeout - %v", err) + } + + messageLength, err := conn.connection.Read(buf) + if err != nil { + return nil, fmt.Errorf("failed to read InitMessage - %v", err) + } + + var initMessage initMessage + err = json.Unmarshal(buf[:messageLength], &initMessage) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response - %v", err) + } + + if initMessage.MaxOutputLen == 0 { + return nil, fmt.Errorf("failed to read maxOutputLen information") + } + + if !conn.messageShowed { + conn.maxOutputLen = initMessage.MaxOutputLen + conn.messageShowed = true + return &initMessage, nil + } + + return nil, nil +} diff --git a/plugins/inputs/dpdk/dpdk_connector_test.go b/plugins/inputs/dpdk/dpdk_connector_test.go new file mode 100644 index 0000000000000..f5580417c3c67 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_connector_test.go @@ -0,0 +1,183 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/dpdk/mocks" +) + +func Test_readMaxOutputLen(t *testing.T) { + t.Run("should return error if timeout occurred", func(t *testing.T) { + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Return(0, fmt.Errorf("timeout")) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err := connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "timeout") + }) + + t.Run("should pass and set maxOutputLen if provided with valid InitMessage", func(t *testing.T) { + maxOutputLen := uint32(4567) + initMessage := initMessage{ + Version: "DPDK test version", + Pid: 1234, + MaxOutputLen: maxOutputLen, + } + message, err := json.Marshal(initMessage) + require.NoError(t, err) + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err = connector.readMaxOutputLen() + + require.NoError(t, err) + require.Equal(t, maxOutputLen, connector.maxOutputLen) + }) + + t.Run("should fail if received invalid json", func(t *testing.T) { + message := `{notAJson}` + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err := connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "looking for beginning of object key string") + }) + + t.Run("should fail if received maxOutputLen equals to 0", func(t *testing.T) { + message, err := json.Marshal(initMessage{ + Version: "test", + Pid: 1, + MaxOutputLen: 0, + }) + require.NoError(t, err) + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err = connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to read maxOutputLen information") + }) +} + +func Test_connect(t *testing.T) { + t.Run("should pass if PathToSocket points to socket", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + connector: newDpdkConnector(pathToSocket, 0), + } + go simulateSocketResponse(socket, t) + + _, err := dpdk.connector.connect() + + require.NoError(t, err) + }) +} + +func Test_getCommandResponse(t *testing.T) { + command := "/" + response := "myResponseString" + + t.Run("should return proper buffer size and value if no error occurred", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, response, nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.NoError(t, err) + require.Equal(t, len(response), len(buf)) + require.Equal(t, response, string(buf)) + }) + + t.Run("should return error if failed to get connection handler", func(t *testing.T) { + _, dpdk, _ := prepareEnvironment() + dpdk.connector.connection = nil + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to get connection to execute / command") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if failed to set timeout duration", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("SetDeadline", mock.Anything).Return(fmt.Errorf("deadline error")) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "deadline error") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if timeout occurred during Write operation", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("Write", mock.Anything).Return(0, fmt.Errorf("write timeout")) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + mockConn.On("Close").Return(nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "write timeout") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if timeout occurred during Read operation", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, "", fmt.Errorf("read timeout")) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "read timeout") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if got empty response", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, "", nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Equal(t, 0, len(buf)) + require.Contains(t, err.Error(), "got empty response during execution of") + }) +} diff --git a/plugins/inputs/dpdk/dpdk_notlinux.go b/plugins/inputs/dpdk/dpdk_notlinux.go new file mode 100644 index 0000000000000..1831b1212ae78 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package dpdk diff --git a/plugins/inputs/dpdk/dpdk_test.go b/plugins/inputs/dpdk/dpdk_test.go new file mode 100644 index 0000000000000..41d2da3d07777 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_test.go @@ -0,0 +1,399 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "net" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/inputs/dpdk/mocks" + "github.com/influxdata/telegraf/testutil" +) + +func Test_Init(t *testing.T) { + t.Run("when SocketPath field isn't set then it should be set to default value", func(t *testing.T) { + _, dpdk, _ := prepareEnvironment() + dpdk.SocketPath = "" + require.Equal(t, "", dpdk.SocketPath) + + _ = dpdk.Init() + + require.Equal(t, defaultPathToSocket, dpdk.SocketPath) + }) + + t.Run("when commands are in invalid format (doesn't start with '/') then error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + AdditionalCommands: []string{"invalid"}, + } + + err := dpdk.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "command should start with '/'") + }) + + t.Run("when all values are valid, then no error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + DeviceTypes: []string{"ethdev"}, + Log: testutil.Logger{}, + } + go simulateSocketResponse(socket, t) + + err := dpdk.Init() + + require.NoError(t, err) + }) + + t.Run("when device_types and additional_commands are empty, then error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + DeviceTypes: []string{}, + AdditionalCommands: []string{}, + Log: testutil.Logger{}, + } + + err := dpdk.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "plugin was configured with nothing to read") + }) +} + +func Test_validateCommands(t *testing.T) { + t.Run("when validating commands in correct format then no error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{"/test", "/help"}, + } + + err := dpdk.validateCommands() + + require.NoError(t, err) + }) + + t.Run("when validating command that doesn't begin with slash then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "commandWithoutSlash", + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "command should start with '/'") + }) + + t.Run("when validating long command (without parameters) then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/" + strings.Repeat("a", maxCommandLength), + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "command is too long") + }) + + t.Run("when validating long command (with params) then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/," + strings.Repeat("a", maxCommandLengthWithParams), + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "shall be less than 1024 characters") + }) + + t.Run("when validating empty command then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "", + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "got empty command") + }) + + t.Run("when validating commands with duplicates then duplicates should be removed and no error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/test", + }, + } + require.Equal(t, 2, len(dpdk.AdditionalCommands)) + + err := dpdk.validateCommands() + + require.Equal(t, 1, len(dpdk.AdditionalCommands)) + require.NoError(t, err) + }) +} + +func Test_dpdkPluginDescriber(t *testing.T) { + dpdk := dpdk{} + t.Run("sampleConfig function should return value from constant", func(t *testing.T) { + require.Equal(t, sampleConfig, dpdk.SampleConfig()) + }) + + t.Run("description function should return value from constant", func(t *testing.T) { + require.Equal(t, description, dpdk.Description()) + }) +} + +func prepareEnvironment() (*mocks.Conn, dpdk, *testutil.Accumulator) { + mockConnection := &mocks.Conn{} + dpdk := dpdk{ + connector: &dpdkConnector{ + connection: mockConnection, + maxOutputLen: 1024, + accessTimeout: 2 * time.Second, + }, + Log: testutil.Logger{}, + } + mockAcc := &testutil.Accumulator{} + return mockConnection, dpdk, mockAcc +} + +func Test_processCommand(t *testing.T) { + t.Run("should pass if received valid response", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/": ["/", "/eal/app_params", "/eal/params", "/ethdev/link_status"]}` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("if received a non-JSON object then should return error", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `notAJson` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "invalid character") + }) + + t.Run("if failed to get command response then accumulator should contain error", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("Write", mock.Anything).Return(0, fmt.Errorf("deadline exceeded")) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + mockConn.On("Close").Return(nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "deadline exceeded") + }) + + t.Run("if response contains nil or empty value then error should be returned in accumulator", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/test": null}` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/test,param") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "got empty json on") + }) +} + +func Test_appendCommandsWithParams(t *testing.T) { + t.Run("when got valid data, then valid commands with params should be created", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/testendpoint": [1,123]}` + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/action1,1", "/action1,123", "/action2,1", "/action2,123"} + + result, err := dpdk.appendCommandsWithParamsFromList("/testendpoint", []string{"/action1", "/action2"}) + + require.NoError(t, err) + require.Equal(t, 4, len(result)) + require.ElementsMatch(t, result, expectedCommands) + }) +} + +func Test_getCommandsAndParamsCombinations(t *testing.T) { + t.Run("when 2 ethdev commands are enabled, then 2*numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, ethdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/ethdev/stats,1", "/ethdev/stats,123", "/ethdev/xstats,1", "/ethdev/xstats,123"} + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when 1 rawdev command is enabled, then 2*numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, rawdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/rawdev/xstats,1", "/rawdev/xstats,123"} + + dpdk.DeviceTypes = []string{"rawdev"} + dpdk.rawdevCommands = []string{"/rawdev/xstats"} + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when 2 ethdev commands are enabled but one command is disabled, then numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, ethdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/ethdev/stats,1", "/ethdev/stats,123"} + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{"/ethdev/xstats"}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when ethdev commands are enabled but params fetching command returns error then error should be logged in accumulator", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, `{notAJson}`, fmt.Errorf("some error")) + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.Equal(t, 0, len(commands)) + require.Equal(t, 1, len(mockAcc.Errors)) + }) +} + +func Test_Gather(t *testing.T) { + t.Run("When parsing a plain json without nested object, then its key should be equal to \"\"", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + dpdk.AdditionalCommands = []string{"/endpoint1"} + simulateResponse(mockConn, `{"/endpoint1":"myvalue"}`, nil) + + err := dpdk.Gather(mockAcc) + + require.NoError(t, err) + require.Equal(t, 0, len(mockAcc.Errors)) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "dpdk", + map[string]string{ + "command": "/endpoint1", + "params": "", + }, + map[string]interface{}{ + "": "myvalue", + }, + time.Unix(0, 0), + ), + } + + actual := mockAcc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) + }) + + t.Run("When parsing a list of value in nested object then list should be flattened", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + dpdk.AdditionalCommands = []string{"/endpoint1"} + simulateResponse(mockConn, `{"/endpoint1":{"myvalue":[0,1,123]}}`, nil) + + err := dpdk.Gather(mockAcc) + require.NoError(t, err) + require.Equal(t, 0, len(mockAcc.Errors)) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "dpdk", + map[string]string{ + "command": "/endpoint1", + "params": "", + }, + map[string]interface{}{ + "myvalue_0": float64(0), + "myvalue_1": float64(1), + "myvalue_2": float64(123), + }, + time.Unix(0, 0), + ), + } + + actual := mockAcc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) + }) +} + +func simulateResponse(mockConn *mocks.Conn, response string, readErr error) { + mockConn.On("Write", mock.Anything).Return(0, nil) + mockConn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, response) + }).Return(len(response), readErr) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + + if readErr != nil { + mockConn.On("Close").Return(nil) + } +} + +func simulateSocketResponse(socket net.Listener, t *testing.T) { + conn, err := socket.Accept() + require.NoError(t, err) + + initMessage, err := json.Marshal(initMessage{MaxOutputLen: 1}) + require.NoError(t, err) + + _, err = conn.Write(initMessage) + require.NoError(t, err) +} diff --git a/plugins/inputs/dpdk/dpdk_utils.go b/plugins/inputs/dpdk/dpdk_utils.go new file mode 100644 index 0000000000000..b7049d8365597 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_utils.go @@ -0,0 +1,117 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strconv" + "strings" + + "github.com/influxdata/telegraf/filter" +) + +func commandWithParams(command string, params string) string { + if params != "" { + return command + "," + params + } + return command +} + +func stripParams(command string) string { + index := strings.IndexRune(command, ',') + if index == -1 { + return command + } + return command[:index] +} + +// Since DPDK is an open-source project, developers can use their own format of params +// so it could "/command,1,3,5,123" or "/command,userId=1, count=1234". +// To avoid issues with different formats of params, all params are returned as single string +func getParams(command string) string { + index := strings.IndexRune(command, ',') + if index == -1 { + return "" + } + return command[index+1:] +} + +// Checks if provided path points to socket +func isSocket(path string) error { + pathInfo, err := os.Lstat(path) + if os.IsNotExist(err) { + return fmt.Errorf("provided path does not exist: '%v'", path) + } + + if err != nil { + return fmt.Errorf("cannot get system information of '%v' file: %v", path, err) + } + + if pathInfo.Mode()&os.ModeSocket != os.ModeSocket { + return fmt.Errorf("provided path does not point to a socket file: '%v'", path) + } + + return nil +} + +// Converts JSON array containing devices identifiers from DPDK response to string slice +func jsonToArray(input []byte, command string) ([]string, error) { + if len(input) == 0 { + return nil, fmt.Errorf("got empty object instead of json") + } + + var rawMessage map[string]json.RawMessage + err := json.Unmarshal(input, &rawMessage) + if err != nil { + return nil, err + } + + var intArray []int64 + var stringArray []string + err = json.Unmarshal(rawMessage[command], &intArray) + if err != nil { + return nil, fmt.Errorf("failed to unmarshall json response - %v", err) + } + + for _, value := range intArray { + stringArray = append(stringArray, strconv.FormatInt(value, 10)) + } + + return stringArray, nil +} + +func removeSubset(elements []string, excludedFilter filter.Filter) []string { + if excludedFilter == nil { + return elements + } + + var result []string + for _, element := range elements { + if !excludedFilter.Match(element) { + result = append(result, element) + } + } + + return result +} + +func uniqueValues(values []string) []string { + in := make(map[string]bool) + result := make([]string, 0, len(values)) + + for _, value := range values { + if !in[value] { + in[value] = true + result = append(result, value) + } + } + return result +} + +func isEmpty(value interface{}) bool { + return value == nil || (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) +} diff --git a/plugins/inputs/dpdk/dpdk_utils_test.go b/plugins/inputs/dpdk/dpdk_utils_test.go new file mode 100644 index 0000000000000..87e8a6c8248c3 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_utils_test.go @@ -0,0 +1,138 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "fmt" + "net" + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_isSocket(t *testing.T) { + t.Run("when path points to non-existing file then error should be returned", func(t *testing.T) { + err := isSocket("/tmp/file-that-doesnt-exists") + + require.Error(t, err) + require.Contains(t, err.Error(), "provided path does not exist") + }) + + t.Run("should pass if path points to socket", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + + err := isSocket(pathToSocket) + + require.NoError(t, err) + }) + + t.Run("if path points to regular file instead of socket then error should be returned", func(t *testing.T) { + pathToFile := "/tmp/dpdk-text-file.txt" + _, err := os.Create(pathToFile) + require.NoError(t, err) + defer os.Remove(pathToFile) + + err = isSocket(pathToFile) + + require.Error(t, err) + require.Contains(t, err.Error(), "provided path does not point to a socket file") + }) +} + +func Test_stripParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string without params then passed string should be returned", func(t *testing.T) { + strippedCommand := stripParams(command) + + require.Equal(t, command, strippedCommand) + }) + + t.Run("when passed string with params then string without params should be returned", func(t *testing.T) { + strippedCommand := stripParams(commandWithParams(command, params)) + + require.Equal(t, command, strippedCommand) + }) +} + +func Test_commandWithParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string with params then command with comma should be returned", func(t *testing.T) { + commandWithParams := commandWithParams(command, params) + + require.Equal(t, command+","+params, commandWithParams) + }) + + t.Run("when passed command with no params then command should be returned", func(t *testing.T) { + commandWithParams := commandWithParams(command, "") + + require.Equal(t, command, commandWithParams) + }) +} + +func Test_getParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string with params then command with comma should be returned", func(t *testing.T) { + commandParams := getParams(commandWithParams(command, params)) + + require.Equal(t, params, commandParams) + }) + + t.Run("when passed command with no params then empty string (representing empty params) should be returned", func(t *testing.T) { + commandParams := getParams(commandWithParams(command, "")) + + require.Equal(t, "", commandParams) + }) +} + +func Test_jsonToArray(t *testing.T) { + key := "/ethdev/list" + t.Run("when got numeric array then string array should be returned", func(t *testing.T) { + firstValue := int64(0) + secondValue := int64(1) + jsonString := fmt.Sprintf(`{"%s": [%d, %d]}`, key, firstValue, secondValue) + + arr, err := jsonToArray([]byte(jsonString), key) + + require.NoError(t, err) + require.Equal(t, strconv.FormatInt(firstValue, 10), arr[0]) + require.Equal(t, strconv.FormatInt(secondValue, 10), arr[1]) + }) + + t.Run("if non-json string is supplied as input then error should be returned", func(t *testing.T) { + _, err := jsonToArray([]byte("{notAJson}"), key) + + require.Error(t, err) + }) + + t.Run("when empty string is supplied as input then error should be returned", func(t *testing.T) { + jsonString := "" + + _, err := jsonToArray([]byte(jsonString), key) + + require.Error(t, err) + require.Contains(t, err.Error(), "got empty object instead of json") + }) + + t.Run("when valid json with json-object is supplied as input then error should be returned", func(t *testing.T) { + jsonString := fmt.Sprintf(`{"%s": {"testKey": "testValue"}}`, key) + + _, err := jsonToArray([]byte(jsonString), key) + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to unmarshall json response") + }) +} + +func createSocketForTest(t *testing.T) (string, net.Listener) { + pathToSocket := "/tmp/dpdk-test-socket" + socket, err := net.Listen("unixpacket", pathToSocket) + require.NoError(t, err) + return pathToSocket, socket +} diff --git a/plugins/inputs/dpdk/mocks/conn.go b/plugins/inputs/dpdk/mocks/conn.go new file mode 100644 index 0000000000000..58961039dce86 --- /dev/null +++ b/plugins/inputs/dpdk/mocks/conn.go @@ -0,0 +1,146 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package mocks + +import ( + net "net" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Conn is an autogenerated mock type for the Conn type +type Conn struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Conn) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// LocalAddr provides a mock function with given fields: +func (_m *Conn) LocalAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// Read provides a mock function with given fields: b +func (_m *Conn) Read(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoteAddr provides a mock function with given fields: +func (_m *Conn) RemoteAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// SetDeadline provides a mock function with given fields: t +func (_m *Conn) SetDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetReadDeadline provides a mock function with given fields: t +func (_m *Conn) SetReadDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetWriteDeadline provides a mock function with given fields: t +func (_m *Conn) SetWriteDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: b +func (_m *Conn) Write(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index 9e3188eec30bf..0bf8b983cd219 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -87,7 +87,6 @@ present in the metadata/stats endpoints. - id - name - fields: - - revision (string) - desired_status (string) - known_status (string) - limit_cpu (float) @@ -226,7 +225,7 @@ present in the metadata/stats endpoints. ### Example Output ``` -ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 +ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu0,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_total=26426156i 1542642001000000000 diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index d7ce10cb2a2e0..b5521c5ea3f3a 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -3,7 +3,6 @@ package ecs import ( "fmt" "io" - "io/ioutil" "net/http" "net/url" "time" @@ -113,7 +112,7 @@ func (c *EcsClient) Task() (*Task, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body) } @@ -137,7 +136,7 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body) } @@ -152,7 +151,6 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { // PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned. // If either errors, a single error is returned. func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) { - var task *Task var stats map[string]types.StatsJSON var err error diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 333aec80c2709..7e9d7e393346f 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -3,7 +3,7 @@ package ecs import ( "bytes" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -27,7 +27,6 @@ func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) { } func TestEcsClient_PollSync(t *testing.T) { - tests := []struct { name string mock *pollMock @@ -109,7 +108,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -130,7 +129,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -142,7 +141,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -180,7 +179,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -202,7 +201,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -215,7 +214,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index 5fa53d4fd58bc..f044e8d2cb7fe 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -6,15 +6,15 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) // Ecs config object type Ecs struct { EndpointURL string `toml:"endpoint_url"` - Timeout internal.Duration + Timeout config.Duration ContainerNameInclude []string `toml:"container_name_include"` ContainerNameExclude []string `toml:"container_name_exclude"` @@ -114,7 +114,7 @@ func initSetup(ecs *Ecs) error { if ecs.client == nil { resolveEndpoint(ecs) - c, err := ecs.newClient(ecs.Timeout.Duration, ecs.EndpointURL, ecs.metadataVersion) + c, err := ecs.newClient(time.Duration(ecs.Timeout), ecs.EndpointURL, ecs.metadataVersion) if err != nil { return err } @@ -166,7 +166,6 @@ func resolveEndpoint(ecs *Ecs) { func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumulator) { taskFields := map[string]interface{}{ - "revision": task.Revision, "desired_status": task.DesiredStatus, "known_status": task.KnownStatus, "limit_cpu": task.Limits["CPU"], @@ -221,20 +220,20 @@ func mergeTags(a map[string]string, b map[string]string) map[string]string { } func (ecs *Ecs) createContainerNameFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) + containerNameFilter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) if err != nil { return err } - ecs.containerNameFilter = filter + ecs.containerNameFilter = containerNameFilter return nil } func (ecs *Ecs) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) if err != nil { return err } - ecs.labelFilter = filter + ecs.labelFilter = labelFilter return nil } @@ -251,11 +250,11 @@ func (ecs *Ecs) createContainerStatusFilters() error { ecs.ContainerStatusExclude[i] = strings.ToUpper(exclude) } - filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) + statusFilter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) if err != nil { return err } - ecs.statusFilter = filter + ecs.statusFilter = statusFilter return nil } @@ -263,7 +262,7 @@ func init() { inputs.Add("ecs", func() telegraf.Input { return &Ecs{ EndpointURL: "", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), newClient: NewClient, filtersCreated: false, } diff --git a/plugins/inputs/ecs/ecs_test.go b/plugins/inputs/ecs/ecs_test.go index 5d64fef01efad..5a837d1ae4517 100644 --- a/plugins/inputs/ecs/ecs_test.go +++ b/plugins/inputs/ecs/ecs_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // codified golden objects for tests @@ -800,10 +800,10 @@ func TestResolveEndpoint(t *testing.T) { { name: "Endpoint is not set, ECS_CONTAINER_METADATA_URI is set => use v3 metadata", preF: func() { - os.Setenv("ECS_CONTAINER_METADATA_URI", "v3-endpoint.local") + require.NoError(t, os.Setenv("ECS_CONTAINER_METADATA_URI", "v3-endpoint.local")) }, afterF: func() { - os.Unsetenv("ECS_CONTAINER_METADATA_URI") + require.NoError(t, os.Unsetenv("ECS_CONTAINER_METADATA_URI")) }, given: Ecs{ EndpointURL: "", @@ -825,7 +825,7 @@ func TestResolveEndpoint(t *testing.T) { act := tt.given resolveEndpoint(&act) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } diff --git a/plugins/inputs/ecs/stats.go b/plugins/inputs/ecs/stats.go index d2a8ee5d34cfd..13d9aa3bc5326 100644 --- a/plugins/inputs/ecs/stats.go +++ b/plugins/inputs/ecs/stats.go @@ -284,7 +284,6 @@ func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m } else { totalStatMap[field] = uintV } - } } diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 54285c3b9e8e3..d3be315117019 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -12,6 +12,7 @@ In addition, the following optional queries are only made by the master node: [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) Specific Elasticsearch endpoints that are queried: + - Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting - Cluster Heath: /_cluster/health?level=indices - Cluster Stats: /_cluster/stats @@ -20,7 +21,7 @@ Specific Elasticsearch endpoints that are queried: Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. -### Configuration +## Configuration ```toml [[inputs.elasticsearch]] @@ -29,6 +30,9 @@ Note that specific statistics information can change between Elasticsearch versi ## servers = ["http://user:pass@localhost:9200"] servers = ["http://localhost:9200"] + ## HTTP headers to send with each request + http_headers = { "X-Custom-Header" = "Custom" } + ## Timeout for HTTP requests to the elastic search server(s) http_timeout = "5s" @@ -53,6 +57,7 @@ Note that specific statistics information can change between Elasticsearch versi cluster_stats_only_from_master = true ## Indices to collect; can be one or more indices names or _all + ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. indices_include = ["_all"] ## One of "shards", "cluster", "indices" @@ -74,6 +79,10 @@ Note that specific statistics information can change between Elasticsearch versi # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. + ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most ## recent indices. + # num_most_recent_indices = 0 ``` ### Metrics @@ -164,7 +173,7 @@ Emitted when `cluster_stats = true`: - shards_total (float) - store_size_in_bytes (float) -+ elasticsearch_clusterstats_nodes +- elasticsearch_clusterstats_nodes - tags: - cluster_name - node_name @@ -225,7 +234,7 @@ Emitted when the appropriate `node_stats` options are set. - tx_count (float) - tx_size_in_bytes (float) -+ elasticsearch_breakers +- elasticsearch_breakers - tags: - cluster_name - node_attribute_ml.enabled @@ -286,7 +295,7 @@ Emitted when the appropriate `node_stats` options are set. - total_free_in_bytes (float) - total_total_in_bytes (float) -+ elasticsearch_http +- elasticsearch_http - tags: - cluster_name - node_attribute_ml.enabled @@ -397,7 +406,7 @@ Emitted when the appropriate `node_stats` options are set. - warmer_total (float) - warmer_total_time_in_millis (float) -+ elasticsearch_jvm +- elasticsearch_jvm - tags: - cluster_name - node_attribute_ml.enabled @@ -475,7 +484,7 @@ Emitted when the appropriate `node_stats` options are set. - swap_used_in_bytes (float) - timestamp (float) -+ elasticsearch_process +- elasticsearch_process - tags: - cluster_name - node_attribute_ml.enabled diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index b6dfd2a81b11f..33b9f93cd55be 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -3,7 +3,7 @@ package elasticsearch import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "sort" @@ -12,7 +12,8 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" @@ -91,6 +92,9 @@ const sampleConfig = ` # servers = ["http://user:pass@localhost:9200"] servers = ["http://localhost:9200"] + ## HTTP headers to send with each request + http_headers = { "X-Custom-Header" = "Custom" } + ## Timeout for HTTP requests to the elastic search server(s) http_timeout = "5s" @@ -115,6 +119,7 @@ const sampleConfig = ` cluster_stats_only_from_master = true ## Indices to collect; can be one or more indices names or _all + ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. indices_include = ["_all"] ## One of "shards", "cluster", "indices" @@ -135,6 +140,11 @@ const sampleConfig = ` # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. + ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them + ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. + # num_most_recent_indices = 0 ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch @@ -142,7 +152,8 @@ const sampleConfig = ` type Elasticsearch struct { Local bool `toml:"local"` Servers []string `toml:"servers"` - HTTPTimeout internal.Duration `toml:"http_timeout"` + HTTPHeaders map[string]string `toml:"http_headers"` + HTTPTimeout config.Duration `toml:"http_timeout"` ClusterHealth bool `toml:"cluster_health"` ClusterHealthLevel string `toml:"cluster_health_level"` ClusterStats bool `toml:"cluster_stats"` @@ -152,11 +163,14 @@ type Elasticsearch struct { NodeStats []string `toml:"node_stats"` Username string `toml:"username"` Password string `toml:"password"` + NumMostRecentIndices int `toml:"num_most_recent_indices"` + tls.ClientConfig client *http.Client serverInfo map[string]serverInfo serverInfoMutex sync.Mutex + indexMatchers map[string]filter.Filter } type serverInfo struct { nodeID string @@ -170,7 +184,7 @@ func (i serverInfo) isMaster() bool { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), ClusterStatsOnlyFromMaster: true, ClusterHealthLevel: "indices", } @@ -214,6 +228,19 @@ func (e *Elasticsearch) Description() string { return "Read stats from one or more Elasticsearch servers or clusters" } +// Init the plugin. +func (e *Elasticsearch) Init() error { + // Compile the configured indexes to match for sorting. + indexMatchers, err := e.compileIndexMatchers() + if err != nil { + return err + } + + e.indexMatchers = indexMatchers + + return nil +} + // Gather reads the stats from Elasticsearch and writes it to the // Accumulator. func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { @@ -254,7 +281,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { e.serverInfoMutex.Lock() e.serverInfo[s] = info e.serverInfoMutex.Unlock() - }(serv, acc) } wgC.Wait() @@ -318,12 +344,12 @@ func (e *Elasticsearch) createHTTPClient() (*http.Client, error) { return nil, err } tr := &http.Transport{ - ResponseHeaderTimeout: e.HTTPTimeout.Duration, + ResponseHeaderTimeout: time.Duration(e.HTTPTimeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: e.HTTPTimeout.Duration, + Timeout: time.Duration(e.HTTPTimeout), } return client, nil @@ -527,66 +553,131 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) acc.AddFields("elasticsearch_indices_stats_"+m, jsonParser.Fields, map[string]string{"index_name": "_all"}, now) } - // Individual Indices stats - for id, index := range indicesStats.Indices { - indexTag := map[string]string{"index_name": id} - stats := map[string]interface{}{ - "primaries": index.Primaries, - "total": index.Total, + // Gather stats for each index. + err := e.gatherIndividualIndicesStats(indicesStats.Indices, now, acc) + + return err +} + +// gatherSortedIndicesStats gathers stats for all indices in no particular order. +func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexStat, now time.Time, acc telegraf.Accumulator) error { + // Sort indices into buckets based on their configured prefix, if any matches. + categorizedIndexNames := e.categorizeIndices(indices) + for _, matchingIndices := range categorizedIndexNames { + // Establish the number of each category of indices to use. User can configure to use only the latest 'X' amount. + indicesCount := len(matchingIndices) + indicesToTrackCount := indicesCount + + // Sort the indices if configured to do so. + if e.NumMostRecentIndices > 0 { + if e.NumMostRecentIndices < indicesToTrackCount { + indicesToTrackCount = e.NumMostRecentIndices + } + sort.Strings(matchingIndices) } - for m, s := range stats { - f := jsonparser.JSONFlattener{} - // parse Json, getting strings and bools - err := f.FullFlattenJSON("", s, true, true) + + // Gather only the number of indexes that have been configured, in descending order (most recent, if date-stamped). + for i := indicesCount - 1; i >= indicesCount-indicesToTrackCount; i-- { + indexName := matchingIndices[i] + + err := e.gatherSingleIndexStats(indexName, indices[indexName], now, acc) if err != nil { return err } - acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) } + } - if e.IndicesLevel == "shards" { - for shardNumber, shards := range index.Shards { - for _, shard := range shards { + return nil +} - // Get Shard Stats - flattened := jsonparser.JSONFlattener{} - err := flattened.FullFlattenJSON("", shard, true, true) - if err != nil { - return err - } +func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) map[string][]string { + categorizedIndexNames := map[string][]string{} - // determine shard tag and primary/replica designation - shardType := "replica" - if flattened.Fields["routing_primary"] == true { - shardType = "primary" - } - delete(flattened.Fields, "routing_primary") + // If all indices are configured to be gathered, bucket them all together. + if len(e.IndicesInclude) == 0 || e.IndicesInclude[0] == "_all" { + for indexName := range indices { + categorizedIndexNames["_all"] = append(categorizedIndexNames["_all"], indexName) + } - routingState, ok := flattened.Fields["routing_state"].(string) - if ok { - flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) - } + return categorizedIndexNames + } - routingNode, _ := flattened.Fields["routing_node"].(string) - shardTags := map[string]string{ - "index_name": id, - "node_id": routingNode, - "shard_name": string(shardNumber), - "type": shardType, - } + // Bucket each returned index with its associated configured index (if any match). + for indexName := range indices { + match := indexName + for name, matcher := range e.indexMatchers { + // If a configured index matches one of the returned indexes, mark it as a match. + if matcher.Match(match) { + match = name + break + } + } - for key, field := range flattened.Fields { - switch field.(type) { - case string, bool: - delete(flattened.Fields, key) - } - } + // Bucket all matching indices together for sorting. + categorizedIndexNames[match] = append(categorizedIndexNames[match], indexName) + } - acc.AddFields("elasticsearch_indices_stats_shards", - flattened.Fields, - shardTags, - now) + return categorizedIndexNames +} + +func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now time.Time, acc telegraf.Accumulator) error { + indexTag := map[string]string{"index_name": name} + stats := map[string]interface{}{ + "primaries": index.Primaries, + "total": index.Total, + } + for m, s := range stats { + f := jsonparser.JSONFlattener{} + // parse Json, getting strings and bools + err := f.FullFlattenJSON("", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) + } + + if e.IndicesLevel == "shards" { + for shardNumber, shards := range index.Shards { + for _, shard := range shards { + // Get Shard Stats + flattened := jsonparser.JSONFlattener{} + err := flattened.FullFlattenJSON("", shard, true, true) + if err != nil { + return err + } + + // determine shard tag and primary/replica designation + shardType := "replica" + routingPrimary, _ := flattened.Fields["routing_primary"].(bool) + if routingPrimary { + shardType = "primary" + } + delete(flattened.Fields, "routing_primary") + + routingState, ok := flattened.Fields["routing_state"].(string) + if ok { + flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) + } + + routingNode, _ := flattened.Fields["routing_node"].(string) + shardTags := map[string]string{ + "index_name": name, + "node_id": routingNode, + "shard_name": shardNumber, + "type": shardType, } + + for key, field := range flattened.Fields { + switch field.(type) { + case string, bool: + delete(flattened.Fields, key) + } + } + + acc.AddFields("elasticsearch_indices_stats_shards", + flattened.Fields, + shardTags, + now) } } } @@ -604,6 +695,10 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { req.SetBasicAuth(e.Username, e.Password) } + for key, value := range e.HTTPHeaders { + req.Header.Add(key, value) + } + r, err := e.client.Do(req) if err != nil { return "", err @@ -615,7 +710,7 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { // future calls. return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } - response, err := ioutil.ReadAll(r.Body) + response, err := io.ReadAll(r.Body) if err != nil { return "", err @@ -636,6 +731,10 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { req.SetBasicAuth(e.Username, e.Password) } + for key, value := range e.HTTPHeaders { + req.Header.Add(key, value) + } + r, err := e.client.Do(req) if err != nil { return err @@ -649,11 +748,24 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { r.StatusCode, http.StatusOK) } - if err = json.NewDecoder(r.Body).Decode(v); err != nil { - return err + return json.NewDecoder(r.Body).Decode(v) +} + +func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) { + indexMatchers := map[string]filter.Filter{} + var err error + + // Compile each configured index into a glob matcher. + for _, configuredIndex := range e.IndicesInclude { + if _, exists := indexMatchers[configuredIndex]; !exists { + indexMatchers[configuredIndex], err = filter.Compile([]string{configuredIndex}) + if err != nil { + return nil, err + } + } } - return nil + return indexMatchers, nil } func init() { diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index ad91c898a1a5c..1ed61e731ce1f 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -1,17 +1,14 @@ package elasticsearch import ( - "io/ioutil" + "io" "net/http" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - - "fmt" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func defaultTags() map[string]string { @@ -33,9 +30,9 @@ type transportMock struct { body string } -func newTransportMock(statusCode int, body string) http.RoundTripper { +func newTransportMock(body string) http.RoundTripper { return &transportMock{ - statusCode: statusCode, + statusCode: http.StatusOK, body: body, } } @@ -47,19 +44,11 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } -func (t *transportMock) CancelRequest(_ *http.Request) { -} - -func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) { - if es.serverInfo[server].isMaster() != expected { - msg := fmt.Sprintf("IsMaster set incorrectly") - assert.Fail(t, msg) - } -} +func (t *transportMock) CancelRequest(_ *http.Request) {} func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { tags := defaultTags() @@ -77,16 +66,13 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { func TestGather(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := acc.GatherError(es.Gather); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, acc.GatherError(es.Gather)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -94,16 +80,13 @@ func TestGatherIndividualStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.NodeStats = []string{"jvm", "process"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess) + es.client.Transport = newTransportMock(nodeStatsResponseJVMProcess) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := acc.GatherError(es.Gather); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, acc.GatherError(es.Gather)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") tags := defaultTags() acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) @@ -120,16 +103,13 @@ func TestGatherIndividualStats(t *testing.T) { func TestGatherNodeStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, es.gatherNodeStats("junk", &acc)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -138,14 +118,13 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.client.Transport = newTransportMock(clusterHealthResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -165,14 +144,13 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "cluster" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.client.Transport = newTransportMock(clusterHealthResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -192,14 +170,13 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "indices" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices) + es.client.Transport = newTransportMock(clusterHealthResponseWithIndices) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -223,31 +200,25 @@ func TestGatherClusterStatsMaster(t *testing.T) { info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster - es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult) + es.client.Transport = newTransportMock(IsMasterResult) masterID, err := es.getCatMaster("junk") require.NoError(t, err) info.masterID = masterID es.serverInfo["http://example.com:9200"] = info - IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") - if masterID != IsMasterResultTokens[0] { - msg := fmt.Sprintf("catmaster is incorrect") - assert.Fail(t, msg) - } + isMasterResultTokens := strings.Split(IsMasterResult, " ") + require.Equal(t, masterID, isMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], true, t) + es.client.Transport = newTransportMock(nodeStatsResponse) + require.NoError(t, es.gatherNodeStats("junk", &acc)) + require.True(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) // now test the clusterstats method - es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse) + es.client.Transport = newTransportMock(clusterStatsResponse) require.NoError(t, es.gatherClusterStats("junk", &acc)) tags := map[string]string{ @@ -269,26 +240,21 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster - es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult) + es.client.Transport = newTransportMock(IsNotMasterResult) masterID, err := es.getCatMaster("junk") require.NoError(t, err) - IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") - if masterID != IsNotMasterResultTokens[0] { - msg := fmt.Sprintf("catmaster is incorrect") - assert.Fail(t, msg) - } + isNotMasterResultTokens := strings.Split(IsNotMasterResult, " ") + require.Equal(t, masterID, isNotMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } + es.client.Transport = newTransportMock(nodeStatsResponse) + require.NoError(t, es.gatherNodeStats("junk", &acc)) // ensure flag is clear so Cluster Stats would not be done - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -296,32 +262,69 @@ func TestGatherClusterIndicesStats(t *testing.T) { es := newElasticsearchWithClient() es.IndicesInclude = []string{"_all"} es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesResponse) + es.client.Transport = newTransportMock(clusterIndicesResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherIndicesStats("junk", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherIndicesStats("junk", &acc)) acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", clusterIndicesExpected, map[string]string{"index_name": "twitter"}) } +func TestGatherDateStampedIndicesStats(t *testing.T) { + es := newElasticsearchWithClient() + es.IndicesInclude = []string{"twitter*", "influx*", "penguins"} + es.NumMostRecentIndices = 2 + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(dateStampedIndicesResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() + require.NoError(t, es.Init()) + + var acc testutil.Accumulator + require.NoError(t, es.gatherIndicesStats(es.Servers[0]+"/"+strings.Join(es.IndicesInclude, ",")+"/_stats", &acc)) + + // includes 2 most recent indices for "twitter", only expect the most recent two. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_08_02"}) + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_08_01"}) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_07_31"}) + + // includes 2 most recent indices for "influx", only expect the most recent two. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2021.01.02"}) + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2021.01.01"}) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2020.12.31"}) + + // not configured to sort the 'penguins' index, but ensure it is also included. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "penguins"}) +} + func TestGatherClusterIndiceShardsStats(t *testing.T) { es := newElasticsearchWithClient() es.IndicesLevel = "shards" es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesShardsResponse) + es.client.Transport = newTransportMock(clusterIndicesShardsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherIndicesStats("junk", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherIndicesStats("junk", &acc)) acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", clusterIndicesExpected, diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index a04fe1521e999..1006e4848bb65 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -2089,6 +2089,2008 @@ const clusterIndicesResponse = ` } }` +const dateStampedIndicesResponse = ` +{ + "_shards": { + "total": 9, + "successful": 6, + "failed": 0 + }, + "_all": { + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "indices": { + "twitter_2020_08_02": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "twitter_2020_08_01": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "twitter_2020_07_31": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2021.01.02": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2020.12.31": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2021.01.01": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "penguins": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + } + } +}` + var clusterIndicesExpected = map[string]interface{}{ "completion_size_in_bytes": float64(0), "docs_count": float64(999), diff --git a/plugins/inputs/elasticsearch_query/README.md b/plugins/inputs/elasticsearch_query/README.md new file mode 100755 index 0000000000000..5e90d19e72f21 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/README.md @@ -0,0 +1,166 @@ +# Elasticsearch query input plugin + +This [elasticsearch](https://www.elastic.co/) query plugin queries endpoints to obtain metrics from data stored in an Elasticsearch cluster. + +The following is supported: + +- return number of hits for a search query +- calculate the avg/max/min/sum for a numeric field, filtered by a query, aggregated per tag +- count number of terms for a particular field + +## Elasticsearch support + +This plugins is tested against Elasticsearch 5.x and 6.x releases. +Currently it is known to break on 7.x or greater versions. + +## Configuration + +```toml +[[inputs.elasticsearch_query]] + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + + ## Elasticsearch client timeout, defaults to "5s". + # timeout = "5s" + + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option + # enable_sniffer = false + + ## Set the interval to check if the Elasticsearch nodes are available + ## This option is only used if enable_sniffer is also set (0s to disable it) + # health_check_interval = "10s" + + ## HTTP basic authentication details (eg. when using x-pack) + # username = "telegraf" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + [[inputs.elasticsearch_query.aggregation]] + ## measurement name for the results of the aggregation query + measurement_name = "measurement" + + ## Elasticsearch indexes to query (accept wildcards). + index = "index-*" + + ## The date/time field in the Elasticsearch index (mandatory). + date_field = "@timestamp" + + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + + ## Time window to query (eg. "1m" to query documents from last minute). + ## Normally should be set to same as collection interval + query_period = "1m" + + ## Lucene query to filter results + # filter_query = "*" + + ## Fields to aggregate values (must be numeric fields) + # metric_fields = ["metric"] + + ## Aggregation function to use on the metric fields + ## Must be set if 'metric_fields' is set + ## Valid values are: avg, sum, min, max, sum + # metric_function = "avg" + + ## Fields to be used as tags + ## Must be text, non-analyzed fields. Metric aggregations are performed per tag + # tags = ["field.keyword", "field2.keyword"] + + ## Set to true to not ignore documents when the tag(s) above are missing + # include_missing_tag = false + + ## String value of the tag when the tag does not exist + ## Used when include_missing_tag is true + # missing_tag_value = "null" +``` + +## Examples + +Please note that the `[[inputs.elasticsearch_query]]` is still required for all of the examples below. + +### Search the average response time, per URI and per response status code + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "my-index-*" + filter_query = "*" + metric_fields = ["response_time"] + metric_function = "avg" + tags = ["URI.keyword", "response.keyword"] + include_missing_tag = true + missing_tag_value = "null" + date_field = "@timestamp" + query_period = "1m" +``` + +### Search the maximum response time per method and per URI + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "my-index-*" + filter_query = "*" + metric_fields = ["response_time"] + metric_function = "max" + tags = ["method.keyword","URI.keyword"] + include_missing_tag = false + missing_tag_value = "null" + date_field = "@timestamp" + query_period = "1m" +``` + +### Search number of documents matching a filter query in all indices + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "*" + filter_query = "product_1 AND HEAD" + query_period = "1m" + date_field = "@timestamp" +``` + +### Search number of documents matching a filter query, returning per response status code + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "*" + filter_query = "downloads" + tags = ["response.keyword"] + include_missing_tag = false + date_field = "@timestamp" + query_period = "1m" +``` + +### Required parameters + +- `measurement_name`: The target measurement to be stored the results of the aggregation query. +- `index`: The index name to query on Elasticsearch +- `query_period`: The time window to query (eg. "1m" to query documents from last minute). Normally should be set to same as collection +- `date_field`: The date/time field in the Elasticsearch index + +### Optional parameters + +- `date_field_custom_format`: Not needed if using one of the built in date/time formats of Elasticsearch, but may be required if using a custom date/time format. The format syntax uses the [Joda date format](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/search-aggregations-bucket-daterange-aggregation.html#date-format-pattern). +- `filter_query`: Lucene query to filter the results (default: "\*") +- `metric_fields`: The list of fields to perform metric aggregation (these must be indexed as numeric fields) +- `metric_funcion`: The single-value metric aggregation function to be performed on the `metric_fields` defined. Currently supported aggregations are "avg", "min", "max", "sum". (see [https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) +- `tags`: The list of fields to be used as tags (these must be indexed as non-analyzed fields). A "terms aggregation" will be done per tag defined +- `include_missing_tag`: Set to true to not ignore documents where the tag(s) specified above does not exist. (If false, documents without the specified tag field will be ignored in `doc_count` and in the metric aggregation) +- `missing_tag_value`: The value of the tag that will be set for documents in which the tag field does not exist. Only used when `include_missing_tag` is set to `true`. diff --git a/plugins/inputs/elasticsearch_query/aggregation_parser.go b/plugins/inputs/elasticsearch_query/aggregation_parser.go new file mode 100644 index 0000000000000..c4dff05ee6fee --- /dev/null +++ b/plugins/inputs/elasticsearch_query/aggregation_parser.go @@ -0,0 +1,153 @@ +package elasticsearch_query + +import ( + "fmt" + + "github.com/influxdata/telegraf" + elastic5 "gopkg.in/olivere/elastic.v5" +) + +type resultMetric struct { + name string + fields map[string]interface{} + tags map[string]string +} + +func parseSimpleResult(acc telegraf.Accumulator, measurement string, searchResult *elastic5.SearchResult) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["doc_count"] = searchResult.Hits.TotalHits + + acc.AddFields(measurement, fields, tags) +} + +func parseAggregationResult(acc telegraf.Accumulator, aggregationQueryList []aggregationQueryData, searchResult *elastic5.SearchResult) error { + measurements := map[string]map[string]string{} + + // organize the aggregation query data by measurement + for _, aggregationQuery := range aggregationQueryList { + if measurements[aggregationQuery.measurement] == nil { + measurements[aggregationQuery.measurement] = map[string]string{ + aggregationQuery.name: aggregationQuery.function, + } + } else { + t := measurements[aggregationQuery.measurement] + t[aggregationQuery.name] = aggregationQuery.function + measurements[aggregationQuery.measurement] = t + } + } + + // recurse over query aggregation results per measurement + for measurement, aggNameFunction := range measurements { + var m resultMetric + + m.fields = make(map[string]interface{}) + m.tags = make(map[string]string) + m.name = measurement + + _, err := recurseResponse(acc, aggNameFunction, searchResult.Aggregations, m) + if err != nil { + return err + } + } + return nil +} + +func recurseResponse(acc telegraf.Accumulator, aggNameFunction map[string]string, bucketResponse elastic5.Aggregations, m resultMetric) (resultMetric, error) { + var err error + + aggNames := getAggNames(bucketResponse) + if len(aggNames) == 0 { + // we've reached a single bucket or response without aggregation, nothing here + return m, nil + } + + // metrics aggregations response can contain multiple field values, so we iterate over them + for _, aggName := range aggNames { + aggFunction, found := aggNameFunction[aggName] + if !found { + return m, fmt.Errorf("child aggregation function '%s' not found %v", aggName, aggNameFunction) + } + + resp := getResponseAggregation(aggFunction, aggName, bucketResponse) + if resp == nil { + return m, fmt.Errorf("child aggregation '%s' not found", aggName) + } + + switch resp := resp.(type) { + case *elastic5.AggregationBucketKeyItems: + // we've found a terms aggregation, iterate over the buckets and try to retrieve the inner aggregation values + for _, bucket := range resp.Buckets { + var s string + var ok bool + m.fields["doc_count"] = bucket.DocCount + if s, ok = bucket.Key.(string); !ok { + return m, fmt.Errorf("bucket key is not a string (%s, %s)", aggName, aggFunction) + } + m.tags[aggName] = s + + // we need to recurse down through the buckets, as it may contain another terms aggregation + m, err = recurseResponse(acc, aggNameFunction, bucket.Aggregations, m) + if err != nil { + return m, err + } + + // if there are fields present after finishing the bucket, it is a complete metric + // store it and clean the fields to start a new metric + if len(m.fields) > 0 { + acc.AddFields(m.name, m.fields, m.tags) + m.fields = make(map[string]interface{}) + } + + // after finishing the bucket, remove its tag from the tags map + delete(m.tags, aggName) + } + + case *elastic5.AggregationValueMetric: + if resp.Value != nil { + m.fields[aggName] = *resp.Value + } else { + m.fields[aggName] = float64(0) + } + + default: + return m, fmt.Errorf("aggregation type %T not supported", resp) + } + } + + // if there are fields here it comes from a metrics aggregation without a parent terms aggregation + if len(m.fields) > 0 { + acc.AddFields(m.name, m.fields, m.tags) + m.fields = make(map[string]interface{}) + } + return m, nil +} + +func getResponseAggregation(function string, aggName string, aggs elastic5.Aggregations) (agg interface{}) { + switch function { + case "avg": + agg, _ = aggs.Avg(aggName) + case "sum": + agg, _ = aggs.Sum(aggName) + case "min": + agg, _ = aggs.Min(aggName) + case "max": + agg, _ = aggs.Max(aggName) + case "terms": + agg, _ = aggs.Terms(aggName) + } + + return agg +} + +// getAggNames returns the aggregation names from a response aggregation +func getAggNames(agg elastic5.Aggregations) (aggs []string) { + for k := range agg { + if (k != "key") && (k != "doc_count") { + aggs = append(aggs, k) + } + } + + return aggs +} diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go new file mode 100644 index 0000000000000..105fa216206fc --- /dev/null +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -0,0 +1,206 @@ +package elasticsearch_query + +import ( + "context" + "fmt" + "strings" + "time" + + elastic5 "gopkg.in/olivere/elastic.v5" +) + +type aggKey struct { + measurement string + name string + function string + field string +} + +type aggregationQueryData struct { + aggKey + isParent bool + aggregation elastic5.Aggregation +} + +func (e *ElasticsearchQuery) runAggregationQuery(ctx context.Context, aggregation esAggregation) (*elastic5.SearchResult, error) { + now := time.Now().UTC() + from := now.Add(time.Duration(-aggregation.QueryPeriod)) + filterQuery := aggregation.FilterQuery + if filterQuery == "" { + filterQuery = "*" + } + + query := elastic5.NewBoolQuery() + query = query.Filter(elastic5.NewQueryStringQuery(filterQuery)) + query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now).Format(aggregation.DateFieldFormat)) + + search := e.esClient.Search().Index(aggregation.Index).Query(query).Size(0) + + // add only parent elastic.Aggregations to the search request, all the rest are subaggregations of these + for _, v := range aggregation.aggregationQueryList { + if v.isParent && v.aggregation != nil { + search.Aggregation(v.aggKey.name, v.aggregation) + } + } + + searchResult, err := search.Do(ctx) + if err != nil && searchResult != nil { + return searchResult, fmt.Errorf("%s - %s", searchResult.Error.Type, searchResult.Error.Reason) + } + + return searchResult, err +} + +// getMetricFields function returns a map of fields and field types on Elasticsearch that matches field.MetricFields +func (e *ElasticsearchQuery) getMetricFields(ctx context.Context, aggregation esAggregation) (map[string]string, error) { + mapMetricFields := make(map[string]string) + + for _, metricField := range aggregation.MetricFields { + resp, err := e.esClient.GetFieldMapping().Index(aggregation.Index).Field(metricField).Do(ctx) + if err != nil { + return mapMetricFields, fmt.Errorf("error retrieving field mappings for %s: %s", aggregation.Index, err.Error()) + } + + for _, index := range resp { + var ok bool + var mappings interface{} + if mappings, ok = index.(map[string]interface{})["mappings"]; !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", index) + } + + var types map[string]interface{} + if types, ok = mappings.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", mappings) + } + + var fields map[string]interface{} + for _, _type := range types { + if fields, ok = _type.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _type) + } + + var field map[string]interface{} + for _, _field := range fields { + if field, ok = _field.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _field) + } + + fullname := field["full_name"] + mapping := field["mapping"] + + var fname string + if fname, ok = fullname.(string); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected string, got %T)", fullname) + } + + var fieldTypes map[string]interface{} + if fieldTypes, ok = mapping.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", mapping) + } + + var fieldType interface{} + for _, _fieldType := range fieldTypes { + if fieldType, ok = _fieldType.(map[string]interface{})["type"]; !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _fieldType) + } + + var ftype string + if ftype, ok = fieldType.(string); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected string, got %T)", fieldType) + } + mapMetricFields[fname] = ftype + } + } + } + } + } + + return mapMetricFields, nil +} + +func (aggregation *esAggregation) buildAggregationQuery() error { + // create one aggregation per metric field found & function defined for numeric fields + for k, v := range aggregation.mapMetricFields { + switch v { + case "long": + case "float": + case "integer": + case "short": + case "double": + case "scaled_float": + default: + continue + } + + agg, err := getFunctionAggregation(aggregation.MetricFunction, k) + if err != nil { + return err + } + + aggregationQuery := aggregationQueryData{ + aggKey: aggKey{ + measurement: aggregation.MeasurementName, + function: aggregation.MetricFunction, + field: k, + name: strings.Replace(k, ".", "_", -1) + "_" + aggregation.MetricFunction, + }, + isParent: true, + aggregation: agg, + } + + aggregation.aggregationQueryList = append(aggregation.aggregationQueryList, aggregationQuery) + } + + // create a terms aggregation per tag + for _, term := range aggregation.Tags { + agg := elastic5.NewTermsAggregation() + if aggregation.IncludeMissingTag && aggregation.MissingTagValue != "" { + agg.Missing(aggregation.MissingTagValue) + } + + agg.Field(term).Size(1000) + + // add each previous parent aggregations as subaggregations of this terms aggregation + for key, aggMap := range aggregation.aggregationQueryList { + if aggMap.isParent { + agg.Field(term).SubAggregation(aggMap.name, aggMap.aggregation).Size(1000) + // update subaggregation map with parent information + aggregation.aggregationQueryList[key].isParent = false + } + } + + aggregationQuery := aggregationQueryData{ + aggKey: aggKey{ + measurement: aggregation.MeasurementName, + function: "terms", + field: term, + name: strings.Replace(term, ".", "_", -1), + }, + isParent: true, + aggregation: agg, + } + + aggregation.aggregationQueryList = append(aggregation.aggregationQueryList, aggregationQuery) + } + + return nil +} + +func getFunctionAggregation(function string, aggfield string) (elastic5.Aggregation, error) { + var agg elastic5.Aggregation + + switch function { + case "avg": + agg = elastic5.NewAvgAggregation().Field(aggfield) + case "sum": + agg = elastic5.NewSumAggregation().Field(aggfield) + case "min": + agg = elastic5.NewMinAggregation().Field(aggfield) + case "max": + agg = elastic5.NewMaxAggregation().Field(aggfield) + default: + return nil, fmt.Errorf("aggregation function '%s' not supported", function) + } + + return agg, nil +} diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go new file mode 100644 index 0000000000000..009577573a4f3 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -0,0 +1,322 @@ +package elasticsearch_query + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + "time" + + elastic5 "gopkg.in/olivere/elastic.v5" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + + ## Elasticsearch client timeout, defaults to "5s". + # timeout = "5s" + + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option + # enable_sniffer = false + + ## Set the interval to check if the Elasticsearch nodes are available + ## This option is only used if enable_sniffer is also set (0s to disable it) + # health_check_interval = "10s" + + ## HTTP basic authentication details (eg. when using x-pack) + # username = "telegraf" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + [[inputs.elasticsearch_query.aggregation]] + ## measurement name for the results of the aggregation query + measurement_name = "measurement" + + ## Elasticsearch indexes to query (accept wildcards). + index = "index-*" + + ## The date/time field in the Elasticsearch index (mandatory). + date_field = "@timestamp" + + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + + ## Time window to query (eg. "1m" to query documents from last minute). + ## Normally should be set to same as collection interval + query_period = "1m" + + ## Lucene query to filter results + # filter_query = "*" + + ## Fields to aggregate values (must be numeric fields) + # metric_fields = ["metric"] + + ## Aggregation function to use on the metric fields + ## Must be set if 'metric_fields' is set + ## Valid values are: avg, sum, min, max, sum + # metric_function = "avg" + + ## Fields to be used as tags + ## Must be text, non-analyzed fields. Metric aggregations are performed per tag + # tags = ["field.keyword", "field2.keyword"] + + ## Set to true to not ignore documents when the tag(s) above are missing + # include_missing_tag = false + + ## String value of the tag when the tag does not exist + ## Used when include_missing_tag is true + # missing_tag_value = "null" +` + +// ElasticsearchQuery struct +type ElasticsearchQuery struct { + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + EnableSniffer bool `toml:"enable_sniffer"` + Timeout config.Duration `toml:"timeout"` + HealthCheckInterval config.Duration `toml:"health_check_interval"` + Aggregations []esAggregation `toml:"aggregation"` + + Log telegraf.Logger `toml:"-"` + + tls.ClientConfig + httpclient *http.Client + esClient *elastic5.Client +} + +// esAggregation struct +type esAggregation struct { + Index string `toml:"index"` + MeasurementName string `toml:"measurement_name"` + DateField string `toml:"date_field"` + DateFieldFormat string `toml:"date_field_custom_format"` + QueryPeriod config.Duration `toml:"query_period"` + FilterQuery string `toml:"filter_query"` + MetricFields []string `toml:"metric_fields"` + MetricFunction string `toml:"metric_function"` + Tags []string `toml:"tags"` + IncludeMissingTag bool `toml:"include_missing_tag"` + MissingTagValue string `toml:"missing_tag_value"` + mapMetricFields map[string]string + aggregationQueryList []aggregationQueryData +} + +// SampleConfig returns sample configuration for this plugin. +func (e *ElasticsearchQuery) SampleConfig() string { + return sampleConfig +} + +// Description returns the plugin description. +func (e *ElasticsearchQuery) Description() string { + return `Derive metrics from aggregating Elasticsearch query results` +} + +// Init the plugin. +func (e *ElasticsearchQuery) Init() error { + if e.URLs == nil { + return fmt.Errorf("elasticsearch urls is not defined") + } + + err := e.connectToES() + if err != nil { + e.Log.Errorf("E! error connecting to elasticsearch: %s", err) + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + for i, agg := range e.Aggregations { + if agg.MeasurementName == "" { + return fmt.Errorf("field 'measurement_name' is not set") + } + if agg.DateField == "" { + return fmt.Errorf("field 'date_field' is not set") + } + err = e.initAggregation(ctx, agg, i) + if err != nil { + e.Log.Errorf("%s", err) + return nil + } + } + return nil +} + +func (e *ElasticsearchQuery) initAggregation(ctx context.Context, agg esAggregation, i int) (err error) { + // retrieve field mapping and build queries only once + agg.mapMetricFields, err = e.getMetricFields(ctx, agg) + if err != nil { + return fmt.Errorf("not possible to retrieve fields: %v", err.Error()) + } + + for _, metricField := range agg.MetricFields { + if _, ok := agg.mapMetricFields[metricField]; !ok { + return fmt.Errorf("metric field '%s' not found on index '%s'", metricField, agg.Index) + } + } + + err = agg.buildAggregationQuery() + if err != nil { + return err + } + + e.Aggregations[i] = agg + return nil +} + +func (e *ElasticsearchQuery) connectToES() error { + var clientOptions []elastic5.ClientOptionFunc + + if e.esClient != nil { + if e.esClient.IsRunning() { + return nil + } + } + + if e.httpclient == nil { + httpclient, err := e.createHTTPClient() + if err != nil { + return err + } + e.httpclient = httpclient + } + + clientOptions = append(clientOptions, + elastic5.SetHttpClient(e.httpclient), + elastic5.SetSniff(e.EnableSniffer), + elastic5.SetURL(e.URLs...), + elastic5.SetHealthcheckInterval(time.Duration(e.HealthCheckInterval)), + ) + + if e.Username != "" { + clientOptions = append(clientOptions, elastic5.SetBasicAuth(e.Username, e.Password)) + } + + if time.Duration(e.HealthCheckInterval) == 0 { + clientOptions = append(clientOptions, elastic5.SetHealthcheck(false)) + } + + client, err := elastic5.NewClient(clientOptions...) + if err != nil { + return err + } + + // check for ES version on first node + esVersion, err := client.ElasticsearchVersion(e.URLs[0]) + if err != nil { + return fmt.Errorf("elasticsearch version check failed: %s", err) + } + + esVersionSplit := strings.Split(esVersion, ".") + + // quit if ES version is not supported + if len(esVersionSplit) == 0 { + return fmt.Errorf("elasticsearch version check failed") + } + + i, err := strconv.Atoi(esVersionSplit[0]) + if err != nil || i < 5 || i > 6 { + return fmt.Errorf("elasticsearch version %s not supported (currently supported versions are 5.x and 6.x)", esVersion) + } + + e.esClient = client + return nil +} + +// Gather writes the results of the queries from Elasticsearch to the Accumulator. +func (e *ElasticsearchQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + err := e.connectToES() + if err != nil { + return err + } + + for i, agg := range e.Aggregations { + wg.Add(1) + go func(agg esAggregation, i int) { + defer wg.Done() + err := e.esAggregationQuery(acc, agg, i) + if err != nil { + acc.AddError(fmt.Errorf("elasticsearch query aggregation %s: %s ", agg.MeasurementName, err.Error())) + } + }(agg, i) + } + + wg.Wait() + return nil +} + +func (e *ElasticsearchQuery) createHTTPClient() (*http.Client, error) { + tlsCfg, err := e.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(e.Timeout), + TLSClientConfig: tlsCfg, + } + httpclient := &http.Client{ + Transport: tr, + Timeout: time.Duration(e.Timeout), + } + + return httpclient, nil +} + +func (e *ElasticsearchQuery) esAggregationQuery(acc telegraf.Accumulator, aggregation esAggregation, i int) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + // try to init the aggregation query if it is not done already + if aggregation.aggregationQueryList == nil { + err := e.initAggregation(ctx, aggregation, i) + if err != nil { + return err + } + aggregation = e.Aggregations[i] + } + + searchResult, err := e.runAggregationQuery(ctx, aggregation) + if err != nil { + return err + } + + if searchResult.Aggregations == nil { + parseSimpleResult(acc, aggregation.MeasurementName, searchResult) + return nil + } + + return parseAggregationResult(acc, aggregation.aggregationQueryList, searchResult) +} + +func init() { + inputs.Add("elasticsearch_query", func() telegraf.Input { + return &ElasticsearchQuery{ + Timeout: config.Duration(time.Second * 5), + HealthCheckInterval: config.Duration(time.Second * 10), + } + }) +} diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go new file mode 100644 index 0000000000000..e017681b7c58d --- /dev/null +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -0,0 +1,730 @@ +package elasticsearch_query + +import ( + "bufio" + "context" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + elastic5 "gopkg.in/olivere/elastic.v5" +) + +var ( + testindex = "test-elasticsearch_query-" + strconv.Itoa(int(time.Now().Unix())) + setupOnce sync.Once +) + +type esAggregationQueryTest struct { + queryName string + testAggregationQueryInput esAggregation + testAggregationQueryData []aggregationQueryData + expectedMetrics []telegraf.Metric + wantBuildQueryErr bool + wantGetMetricFieldsErr bool + wantQueryResErr bool +} + +var queryPeriod = config.Duration(time.Second * 600) + +var testEsAggregationData = []esAggregationQueryTest{ + { + "query 1", + esAggregation{ + Index: testindex, + MeasurementName: "measurement1", + MetricFields: []string{"size"}, + FilterQuery: "product_1", + MetricFunction: "avg", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement1", name: "size_avg", function: "avg", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement1", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement1", + map[string]string{"URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"size_avg": float64(202.30038022813687), "doc_count": int64(263)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 2", + esAggregation{ + Index: testindex, + MeasurementName: "measurement2", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "max", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement2", name: "size_max", function: "max", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement2", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement2", + map[string]string{"URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"size_max": float64(3301), "doc_count": int64(263)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement2", + map[string]string{"URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"size_max": float64(3318), "doc_count": int64(237)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 3", + esAggregation{ + Index: testindex, + MeasurementName: "measurement3", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "sum", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"response.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement3", name: "size_sum", function: "sum", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement3", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "200"}, + map[string]interface{}{"size_sum": float64(22790), "doc_count": int64(22)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "304"}, + map[string]interface{}{"size_sum": float64(0), "doc_count": int64(219)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "404"}, + map[string]interface{}{"size_sum": float64(86932), "doc_count": int64(259)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 4", + esAggregation{ + Index: testindex, + MeasurementName: "measurement4", + MetricFields: []string{"size", "response_time"}, + FilterQuery: "downloads", + MetricFunction: "min", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + IncludeMissingTag: true, + MissingTagValue: "missing", + Tags: []string{"response.keyword", "URI.keyword", "method.keyword"}, + mapMetricFields: map[string]string{"size": "long", "response_time": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement4", name: "size_min", function: "min", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "response_time_min", function: "min", field: "response_time"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "method_keyword", function: "terms", field: "method.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "404", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(318), "response_time_min": float64(126), "doc_count": int64(146)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "304", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(71), "doc_count": int64(113)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(490), "response_time_min": float64(1514), "doc_count": int64(3)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "404", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(318), "response_time_min": float64(237), "doc_count": int64(113)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "304", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(134), "doc_count": int64(106)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(490), "response_time_min": float64(2), "doc_count": int64(13)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1", "method_keyword": "HEAD"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(8479), "doc_count": int64(1)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2", "method_keyword": "HEAD"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(1059), "doc_count": int64(5)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 5", + esAggregation{ + Index: testindex, + MeasurementName: "measurement5", + FilterQuery: "product_2", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement5", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement5", + map[string]string{"URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"doc_count": int64(237)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 6", + esAggregation{ + Index: testindex, + MeasurementName: "measurement6", + FilterQuery: "response: 200", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword", "response.keyword"}, + mapMetricFields: map[string]string{}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement6", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement6", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement6", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"doc_count": int64(4)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement6", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"doc_count": int64(18)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 7 - simple query", + esAggregation{ + Index: testindex, + MeasurementName: "measurement7", + FilterQuery: "response: 200", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + []telegraf.Metric{ + testutil.MustMetric( + "measurement7", + map[string]string{}, + map[string]interface{}{"doc_count": int64(22)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 8", + esAggregation{ + Index: testindex, + MeasurementName: "measurement8", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "max", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement8", name: "size_max", function: "max", field: "size"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement8", + map[string]string{}, + map[string]interface{}{"size_max": float64(3318)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 9 - invalid function", + esAggregation{ + Index: testindex, + MeasurementName: "measurement9", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "average", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + nil, + nil, + true, + false, + true, + }, + { + "query 10 - non-existing metric field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement10", + MetricFields: []string{"none"}, + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, + { + "query 11 - non-existing index field", + esAggregation{ + Index: "notanindex", + MeasurementName: "measurement11", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, + { + "query 12 - non-existing timestamp field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement12", + MetricFields: []string{"size"}, + MetricFunction: "avg", + DateField: "@notatimestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement12", name: "size_avg", function: "avg", field: "size"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement12", + map[string]string{}, + map[string]interface{}{"size_avg": float64(0)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 13 - non-existing tag field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement13", + MetricFields: []string{"size"}, + MetricFunction: "avg", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + IncludeMissingTag: false, + Tags: []string{"nothere"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement13", name: "size_avg", function: "avg", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement13", name: "nothere", function: "terms", field: "nothere"}, + isParent: true, + }, + }, + nil, + false, + false, + false, + }, + { + "query 14 - non-existing custom date/time format", + esAggregation{ + Index: testindex, + MeasurementName: "measurement14", + DateField: "@timestamp", + DateFieldFormat: "yyyy", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, +} + +func setupIntegrationTest() error { + type nginxlog struct { + IPaddress string `json:"IP"` + Timestamp time.Time `json:"@timestamp"` + Method string `json:"method"` + URI string `json:"URI"` + Httpversion string `json:"http_version"` + Response string `json:"response"` + Size float64 `json:"size"` + ResponseTime float64 `json:"response_time"` + } + + e := &ElasticsearchQuery{ + URLs: []string{"http://" + testutil.GetLocalHost() + ":9200"}, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err := e.connectToES() + if err != nil { + return err + } + + bulkRequest := e.esClient.Bulk() + + // populate elasticsearch with nginx_logs test data file + file, err := os.Open("testdata/nginx_logs") + if err != nil { + return err + } + + defer file.Close() + + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + parts := strings.Split(scanner.Text(), " ") + size, _ := strconv.Atoi(parts[9]) + responseTime, _ := strconv.Atoi(parts[len(parts)-1]) + + logline := nginxlog{ + IPaddress: parts[0], + Timestamp: time.Now().UTC(), + Method: strings.Replace(parts[5], `"`, "", -1), + URI: parts[6], + Httpversion: strings.Replace(parts[7], `"`, "", -1), + Response: parts[8], + Size: float64(size), + ResponseTime: float64(responseTime), + } + + bulkRequest.Add(elastic5.NewBulkIndexRequest(). + Index(testindex). + Type("testquery_data"). + Doc(logline)) + } + if scanner.Err() != nil { + return err + } + + _, err = bulkRequest.Do(context.Background()) + if err != nil { + return err + } + + // wait 5s (default) for Elasticsearch to index, so results are consistent + time.Sleep(time.Second * 5) + return nil +} + +func TestElasticsearchQuery(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + setupOnce.Do(func() { + err := setupIntegrationTest() + require.NoError(t, err) + }) + + var acc testutil.Accumulator + e := &ElasticsearchQuery{ + URLs: []string{"http://" + testutil.GetLocalHost() + ":9200"}, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err := e.connectToES() + require.NoError(t, err) + + var aggs []esAggregation + var aggsErr []esAggregation + + for _, agg := range testEsAggregationData { + if !agg.wantQueryResErr { + aggs = append(aggs, agg.testAggregationQueryInput) + } + } + e.Aggregations = aggs + + require.NoError(t, e.Init()) + require.NoError(t, e.Gather(&acc)) + + if len(acc.Errors) > 0 { + t.Errorf("%s", acc.Errors) + } + + var expectedMetrics []telegraf.Metric + for _, result := range testEsAggregationData { + expectedMetrics = append(expectedMetrics, result.expectedMetrics...) + } + testutil.RequireMetricsEqual(t, expectedMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) + + // aggregations that should return an error + for _, agg := range testEsAggregationData { + if agg.wantQueryResErr { + aggsErr = append(aggsErr, agg.testAggregationQueryInput) + } + } + e.Aggregations = aggsErr + require.NoError(t, e.Init()) + require.NoError(t, e.Gather(&acc)) + + if len(acc.Errors) != len(aggsErr) { + t.Errorf("expecting %v query result errors, got %v: %s", len(aggsErr), len(acc.Errors), acc.Errors) + } +} + +func TestElasticsearchQuery_getMetricFields(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + setupOnce.Do(func() { + err := setupIntegrationTest() + require.NoError(t, err) + }) + + type args struct { + ctx context.Context + aggregation esAggregation + } + + e := &ElasticsearchQuery{ + URLs: []string{"http://" + testutil.GetLocalHost() + ":9200"}, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err := e.connectToES() + require.NoError(t, err) + + type test struct { + name string + e *ElasticsearchQuery + args args + want map[string]string + wantErr bool + } + + var tests []test + + for _, d := range testEsAggregationData { + tests = append(tests, test{ + "getMetricFields " + d.queryName, + e, + args{context.Background(), d.testAggregationQueryInput}, + d.testAggregationQueryInput.mapMetricFields, + d.wantGetMetricFieldsErr, + }) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.e.getMetricFields(tt.args.ctx, tt.args.aggregation) + if (err != nil) != tt.wantErr { + t.Errorf("ElasticsearchQuery.buildAggregationQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !cmp.Equal(got, tt.want) { + t.Errorf("ElasticsearchQuery.getMetricFields() = error = %s", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestElasticsearchQuery_buildAggregationQuery(t *testing.T) { + type test struct { + name string + aggregation esAggregation + want []aggregationQueryData + wantErr bool + } + var tests []test + + for _, d := range testEsAggregationData { + tests = append(tests, test{ + "build " + d.queryName, + d.testAggregationQueryInput, + d.testAggregationQueryData, + d.wantBuildQueryErr, + }) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.aggregation.buildAggregationQuery() + if (err != nil) != tt.wantErr { + t.Errorf("ElasticsearchQuery.buildAggregationQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + + opts := []cmp.Option{ + cmp.AllowUnexported(aggKey{}, aggregationQueryData{}), + cmpopts.IgnoreFields(aggregationQueryData{}, "aggregation"), + cmpopts.SortSlices(func(x, y aggregationQueryData) bool { return x.aggKey.name > y.aggKey.name }), + } + + if !cmp.Equal(tt.aggregation.aggregationQueryList, tt.want, opts...) { + t.Errorf("ElasticsearchQuery.buildAggregationQuery(): %s error = %s ", tt.name, cmp.Diff(tt.aggregation.aggregationQueryList, tt.want, opts...)) + } + }) + } +} diff --git a/plugins/inputs/elasticsearch_query/testdata/nginx_logs b/plugins/inputs/elasticsearch_query/testdata/nginx_logs new file mode 100644 index 0000000000000..f6e9c8a110226 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/testdata/nginx_logs @@ -0,0 +1,500 @@ +93.180.71.3 - - [17/May/2015:08:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 12060 +93.180.71.3 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 12355 +80.91.33.133 - - [17/May/2015:08:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26272 +217.168.17.5 - - [17/May/2015:08:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 1514 +217.168.17.5 - - [17/May/2015:08:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 2204 +93.180.71.3 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 6012 +217.168.17.5 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 11220 +217.168.17.5 - - [17/May/2015:08:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 17843 +80.91.33.133 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 22599 +93.180.71.3 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 24828 +217.168.17.5 - - [17/May/2015:08:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 200 3316 "-" "-" 6947 +188.138.60.101 - - [17/May/2015:08:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28288 +80.91.33.133 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23182 +46.4.66.76 - - [17/May/2015:08:05:45 +0000] "GET /downloads/product_1 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16302 +93.180.71.3 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16102 +91.234.194.89 - - [17/May/2015:08:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20268 +80.91.33.133 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 2794 +37.26.93.214 - - [17/May/2015:08:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 319 "-" "Go 1.1 package http" 22809 +188.138.60.101 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8807 +93.180.71.3 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30172 +46.4.66.76 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 1973 +62.75.198.179 - - [17/May/2015:08:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10182 +80.91.33.133 - - [17/May/2015:08:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14307 +173.203.139.108 - - [17/May/2015:08:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10828 +210.245.80.75 - - [17/May/2015:08:05:32 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 21956 +46.4.83.163 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5726 +91.234.194.89 - - [17/May/2015:08:05:18 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10841 +31.22.86.126 - - [17/May/2015:08:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18132 +217.168.17.5 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 200 3301 "-" "-" 10094 +80.91.33.133 - - [17/May/2015:08:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12355 +173.203.139.108 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27325 +80.91.33.133 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14101 +5.83.131.103 - - [17/May/2015:08:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20175 +80.91.33.133 - - [17/May/2015:08:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21384 +200.6.73.40 - - [17/May/2015:08:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6570 +80.91.33.133 - - [17/May/2015:08:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26145 +93.180.71.3 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 32705 +62.75.198.179 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18865 +50.57.209.92 - - [17/May/2015:08:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21639 +188.138.60.101 - - [17/May/2015:08:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31242 +46.4.66.76 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 5910 +50.57.209.92 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22900 +91.239.186.133 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23919 +173.203.139.108 - - [17/May/2015:08:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25169 +80.91.33.133 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24395 +93.190.71.150 - - [17/May/2015:08:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25750 +91.234.194.89 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26673 +46.4.83.163 - - [17/May/2015:08:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32509 +173.203.139.108 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32714 +54.187.216.43 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 5016 +50.57.209.92 - - [17/May/2015:08:05:59 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14449 +80.91.33.133 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13183 +173.203.139.108 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 7791 +5.83.131.103 - - [17/May/2015:08:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 586 +173.203.139.108 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5036 +80.91.33.133 - - [17/May/2015:08:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20358 +50.57.209.92 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2106 +80.91.33.133 - - [17/May/2015:08:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9757 +37.26.93.214 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 200 3318 "-" "Go 1.1 package http" 6222 +23.23.226.37 - - [17/May/2015:08:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 200 2578 "-" "urlgrabber/3.9.1 yum/3.4.3" 9523 +93.180.71.3 - - [17/May/2015:08:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 7228 +173.203.139.108 - - [17/May/2015:08:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31464 +62.75.198.179 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22462 +31.22.86.126 - - [17/May/2015:08:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29906 +50.57.209.92 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16217 +91.239.186.133 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18335 +46.4.66.76 - - [17/May/2015:08:05:00 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 27375 +200.6.73.40 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32073 +173.203.139.108 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31071 +93.190.71.150 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1200 +91.234.194.89 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13143 +173.203.139.108 - - [17/May/2015:08:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16138 +80.91.33.133 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21432 +217.168.17.5 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 1419 +46.4.83.163 - - [17/May/2015:08:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28449 +80.91.33.133 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25906 +50.57.209.92 - - [17/May/2015:08:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27099 +173.203.139.108 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32238 +188.138.60.101 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 237 +80.91.33.133 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7103 +134.119.20.172 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5423 +173.203.139.108 - - [17/May/2015:08:05:29 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6373 +80.91.33.133 - - [17/May/2015:08:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22230 +91.121.161.213 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14196 +80.91.33.133 - - [17/May/2015:08:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17820 +80.91.33.133 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9097 +37.26.93.214 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Go 1.1 package http" 27632 +5.83.131.103 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14609 +50.57.209.92 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21926 +173.203.139.108 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4915 +54.64.16.235 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 12816 +93.180.71.3 - - [17/May/2015:08:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30742 +202.143.95.26 - - [17/May/2015:08:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24544 +202.143.95.26 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25819 +202.143.95.26 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26831 +80.91.33.133 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 1344 +91.239.186.133 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4987 +173.203.139.108 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 404 328 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13419 +80.91.33.133 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12879 +87.233.156.242 - - [17/May/2015:08:05:37 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20611 +62.75.198.179 - - [17/May/2015:08:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1387 +50.57.209.92 - - [17/May/2015:08:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31286 +80.91.33.133 - - [17/May/2015:08:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15247 +93.190.71.150 - - [17/May/2015:08:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 134 +46.4.66.76 - - [17/May/2015:08:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 23909 +80.91.33.133 - - [17/May/2015:08:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15771 +91.234.194.89 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4641 +217.168.17.5 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 6382 +46.4.83.163 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14599 +50.57.209.92 - - [17/May/2015:08:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8263 +200.6.73.40 - - [17/May/2015:08:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23514 +91.121.161.213 - - [17/May/2015:08:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29473 +80.91.33.133 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26659 +188.138.60.101 - - [17/May/2015:08:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5147 +144.76.151.58 - - [17/May/2015:08:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21698 +134.119.20.172 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21077 +80.91.33.133 - - [17/May/2015:09:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 7173 +80.91.33.133 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1878 +5.83.131.103 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24451 +93.180.71.3 - - [17/May/2015:09:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30170 +80.91.33.133 - - [17/May/2015:09:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13156 +50.57.209.92 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 306 +5.83.131.103 - - [17/May/2015:09:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 345 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24862 +62.75.167.106 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10227 +37.26.93.214 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Go 1.1 package http" 28504 +93.64.134.186 - - [17/May/2015:09:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27681 +87.233.156.242 - - [17/May/2015:09:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 1502 +80.91.33.133 - - [17/May/2015:09:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18177 +80.91.33.133 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7934 +54.193.30.212 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 2 +62.75.198.179 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23920 +91.239.186.133 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9333 +83.161.14.106 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19640 +80.91.33.133 - - [17/May/2015:09:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11061 +80.91.33.133 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 24501 +93.190.71.150 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15895 +50.57.209.92 - - [17/May/2015:09:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20558 +80.91.33.133 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 2338 +80.91.33.133 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12192 +217.168.17.5 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 9824 +80.91.33.133 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2246 +54.191.136.177 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 7239 +80.91.33.133 - - [17/May/2015:09:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 21154 +91.234.194.89 - - [17/May/2015:09:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2966 +80.91.33.133 - - [17/May/2015:09:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 10715 +80.91.33.133 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14856 +46.4.83.163 - - [17/May/2015:09:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17717 +91.121.161.213 - - [17/May/2015:09:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9951 +188.138.60.101 - - [17/May/2015:09:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25787 +144.76.151.58 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4930 +195.154.77.170 - - [17/May/2015:09:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21921 +50.57.209.92 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29773 +31.22.86.126 - - [17/May/2015:09:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7593 +54.64.16.235 - - [17/May/2015:09:05:51 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 26867 +202.143.95.26 - - [17/May/2015:09:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31361 +202.143.95.26 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13167 +87.233.156.242 - - [17/May/2015:09:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22554 +62.75.167.106 - - [17/May/2015:09:05:37 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29795 +152.90.220.17 - - [17/May/2015:09:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18753 +80.91.33.133 - - [17/May/2015:09:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 27083 +93.180.71.3 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 28187 +80.91.33.133 - - [17/May/2015:09:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25595 +5.83.131.103 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 26070 +5.83.131.103 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27724 +200.6.73.40 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8086 +46.4.88.134 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 4853 +50.57.209.92 - - [17/May/2015:09:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9464 +93.64.134.186 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 12194 +80.91.33.133 - - [17/May/2015:09:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26621 +62.75.198.180 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29857 +80.91.33.133 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 20514 +80.91.33.133 - - [17/May/2015:09:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 5526 +62.75.198.179 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14143 +80.91.33.133 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 20873 +91.239.186.133 - - [17/May/2015:09:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23230 +80.91.33.133 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25246 +83.161.14.106 - - [17/May/2015:09:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19052 +80.91.33.133 - - [17/May/2015:09:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12362 +195.154.77.170 - - [17/May/2015:09:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10153 +93.190.71.150 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22418 +80.91.33.133 - - [17/May/2015:09:05:43 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 6565 +80.91.33.133 - - [17/May/2015:09:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9883 +144.76.160.62 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 2564 +91.121.161.213 - - [17/May/2015:09:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17140 +46.4.83.163 - - [17/May/2015:09:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22794 +91.234.194.89 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17718 +50.57.209.92 - - [17/May/2015:09:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5434 +188.138.60.101 - - [17/May/2015:09:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 573 +210.245.80.75 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 28482 +144.76.151.58 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31161 +80.91.33.133 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24151 +144.76.117.56 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 6185 +80.91.33.133 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 6276 +31.22.86.126 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 27127 +80.91.33.133 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 9549 +62.75.167.106 - - [17/May/2015:09:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21397 +87.233.156.242 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 10781 +152.90.220.18 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19773 +93.180.71.3 - - [17/May/2015:09:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 11889 +80.91.33.133 - - [17/May/2015:09:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14111 +31.22.86.126 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 319 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17787 +50.57.209.92 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18330 +5.83.131.103 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8993 +46.4.88.134 - - [17/May/2015:09:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17460 +80.91.33.133 - - [17/May/2015:09:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 32412 +80.91.33.133 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12639 +62.75.198.180 - - [17/May/2015:09:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32511 +80.91.33.133 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29012 +80.91.33.133 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9767 +5.83.131.103 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12212 +5.83.131.103 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2440 +5.83.131.103 - - [17/May/2015:09:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8157 +195.154.77.170 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16242 +202.143.95.26 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22261 +93.64.134.186 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15048 +85.214.47.178 - - [17/May/2015:09:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27105 +83.161.14.106 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 32234 +80.70.214.71 - - [17/May/2015:09:05:20 +0000] "HEAD /downloads/product_1 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 8479 +87.233.156.242 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20831 +54.64.16.235 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 18289 +50.57.209.92 - - [17/May/2015:09:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9858 +91.239.186.133 - - [17/May/2015:09:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20442 +91.121.161.213 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9004 +200.6.73.40 - - [17/May/2015:09:05:30 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13221 +62.75.198.179 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 954 +93.190.71.150 - - [17/May/2015:09:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26398 +80.91.33.133 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22775 +80.91.33.133 - - [17/May/2015:09:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13886 +80.91.33.133 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19340 +144.76.160.62 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17157 +80.91.33.133 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9971 +217.168.17.5 - - [17/May/2015:09:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 26268 +80.91.33.133 - - [17/May/2015:09:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 5983 +80.91.33.133 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15296 +144.76.117.56 - - [17/May/2015:09:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 13922 +144.76.151.58 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10692 +80.91.33.133 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 22550 +62.75.167.106 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20757 +80.91.33.133 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25956 +37.187.238.39 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16674 +80.70.214.71 - - [17/May/2015:10:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 327 "-" "Wget/1.13.4 (linux-gnu)" 15327 +91.234.194.89 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21807 +80.91.33.133 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 20469 +188.138.60.101 - - [17/May/2015:10:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10122 +80.91.33.133 - - [17/May/2015:10:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1971 +80.91.33.133 - - [17/May/2015:10:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7263 +93.180.71.3 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 953 +46.4.88.134 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 23703 +80.91.33.133 - - [17/May/2015:10:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 126 +62.210.138.59 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 19171 +31.22.86.126 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31107 +80.91.33.133 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 8252 +54.86.157.236 - - [17/May/2015:10:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 25651 +195.154.233.202 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 3446 +54.86.157.236 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 20770 +80.91.33.133 - - [17/May/2015:10:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27979 +94.23.21.169 - - [17/May/2015:10:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28723 +54.86.157.236 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 13439 +195.154.77.170 - - [17/May/2015:10:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22432 +54.86.157.236 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 1572 +85.214.47.178 - - [17/May/2015:10:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27196 +5.83.131.103 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9637 +5.83.131.103 - - [17/May/2015:10:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 18830 +5.83.131.103 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 844 +5.83.131.103 - - [17/May/2015:10:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20882 +80.91.33.133 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1325 +80.91.33.133 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11125 +84.53.65.28 - - [17/May/2015:10:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10771 +80.91.33.133 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24891 +54.86.157.236 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23541 +217.168.17.5 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 22323 +91.121.161.213 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29114 +80.70.214.71 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 329 "-" "Wget/1.13.4 (linux-gnu)" 13629 +144.76.160.62 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32440 +54.86.157.236 - - [17/May/2015:10:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 20402 +93.64.134.186 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5113 +93.190.71.150 - - [17/May/2015:10:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31729 +87.233.156.242 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 28958 +80.91.33.133 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15630 +91.239.186.133 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 7488 +62.75.198.179 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9316 +144.76.117.56 - - [17/May/2015:10:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9965 +178.32.54.253 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2881 +37.187.238.39 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17544 +83.161.14.106 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11419 +54.86.157.236 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 16406 +91.194.188.90 - - [17/May/2015:10:05:51 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 28324 +83.161.14.106 - - [17/May/2015:10:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 1893 +80.91.33.133 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14697 +93.180.71.3 - - [17/May/2015:10:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16168 +62.210.138.59 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 663 +46.4.88.134 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 27962 +202.143.95.26 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18539 +202.143.95.26 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13495 +202.143.95.26 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3192 +62.75.198.180 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4349 +144.76.137.134 - - [17/May/2015:10:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1395 +80.91.33.133 - - [17/May/2015:10:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12898 +54.86.157.236 - - [17/May/2015:10:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 26930 +80.70.214.71 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 326 "-" "Wget/1.13.4 (linux-gnu)" 16662 +91.234.194.89 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9445 +188.138.60.101 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18804 +80.91.33.133 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22429 +195.154.233.202 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 8456 +94.23.21.169 - - [17/May/2015:10:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32187 +144.76.151.58 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29276 +80.91.33.133 - - [17/May/2015:10:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9700 +62.75.167.106 - - [17/May/2015:10:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10078 +80.91.33.133 - - [17/May/2015:10:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7600 +50.57.209.92 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8540 +202.143.95.26 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24400 +200.6.73.40 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29363 +195.154.77.170 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17025 +54.187.216.43 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 27997 +80.91.33.133 - - [17/May/2015:10:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1806 +80.91.33.133 - - [17/May/2015:10:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 28234 +54.86.157.236 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 19286 +202.143.95.26 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 325 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19522 +202.143.95.26 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23841 +54.86.157.236 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 31135 +80.91.33.133 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21510 +80.91.33.133 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26977 +80.91.33.133 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 1078 +80.91.33.133 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7473 +84.53.65.28 - - [17/May/2015:10:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28347 +92.50.100.22 - - [17/May/2015:10:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8699 +85.214.47.178 - - [17/May/2015:10:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2078 +80.91.33.133 - - [17/May/2015:10:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7013 +54.86.157.236 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 29440 +5.83.131.103 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24206 +37.187.238.39 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 5674 +80.91.33.133 - - [17/May/2015:10:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15781 +195.210.47.239 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 1462 +80.91.33.133 - - [17/May/2015:10:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9446 +54.64.16.235 - - [17/May/2015:10:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23687 +178.32.54.253 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17314 +144.92.16.161 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 4021 +54.86.157.236 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 13168 +87.233.156.242 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 8142 +31.22.86.126 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 28923 +80.91.33.133 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17021 +91.121.161.213 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 711 +80.91.33.133 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15815 +50.57.209.92 - - [17/May/2015:10:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 12290 +91.239.186.133 - - [17/May/2015:10:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9172 +144.76.117.56 - - [17/May/2015:10:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27106 +144.76.160.62 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 2607 +62.210.138.59 - - [17/May/2015:10:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26922 +54.86.157.236 - - [17/May/2015:10:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 2045 +62.75.198.179 - - [17/May/2015:10:05:14 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14090 +93.190.71.150 - - [17/May/2015:10:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2233 +144.76.117.56 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14988 +94.23.21.169 - - [17/May/2015:10:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11645 +91.194.188.90 - - [17/May/2015:10:05:05 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 28064 +93.64.134.186 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16583 +54.86.157.236 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23208 +80.70.214.71 - - [17/May/2015:10:05:23 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 1059 +93.180.71.3 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16367 +195.154.233.202 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26788 +193.192.58.163 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6753 +144.76.137.134 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18307 +54.86.157.236 - - [17/May/2015:11:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 10520 +83.161.14.106 - - [17/May/2015:11:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 5640 +144.76.151.58 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9992 +144.92.16.161 - - [17/May/2015:11:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 3262 +195.154.77.170 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17687 +62.75.198.180 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18911 +91.234.194.89 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22038 +80.91.33.133 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 2238 +188.138.60.101 - - [17/May/2015:11:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10581 +62.75.167.106 - - [17/May/2015:11:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14869 +46.4.88.134 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 6669 +80.91.33.133 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12780 +80.91.33.133 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24133 +84.53.65.28 - - [17/May/2015:11:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14350 +152.90.220.17 - - [17/May/2015:11:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23513 +80.91.33.133 - - [17/May/2015:11:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31695 +80.91.33.133 - - [17/May/2015:11:05:21 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12243 +178.32.54.253 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2641 +54.72.39.202 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 27639 +91.120.61.154 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21180 +37.187.238.39 - - [17/May/2015:11:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 30661 +85.214.47.178 - - [17/May/2015:11:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20380 +80.91.33.133 - - [17/May/2015:11:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11957 +5.83.131.103 - - [17/May/2015:11:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19230 +200.6.73.40 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4087 +5.83.131.103 - - [17/May/2015:11:05:45 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 16383 +91.121.161.213 - - [17/May/2015:11:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11487 +91.239.186.133 - - [17/May/2015:11:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11774 +50.57.209.92 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28472 +80.91.33.133 - - [17/May/2015:11:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24011 +144.92.16.161 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 26633 +87.233.156.242 - - [17/May/2015:11:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16170 +94.23.21.169 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15992 +5.83.131.103 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20999 +80.91.33.133 - - [17/May/2015:11:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23097 +202.143.95.26 - - [17/May/2015:11:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3282 +202.143.95.26 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 4869 +80.91.33.133 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 9310 +80.91.33.133 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 23547 +80.91.33.133 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 5516 +80.91.33.133 - - [17/May/2015:11:05:13 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26601 +62.210.138.59 - - [17/May/2015:11:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26830 +144.76.160.62 - - [17/May/2015:11:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 15405 +93.190.71.150 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16982 +80.91.33.133 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 6019 +202.143.95.26 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3822 +193.192.58.163 - - [17/May/2015:11:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13461 +195.154.233.202 - - [17/May/2015:11:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32439 +80.70.214.71 - - [17/May/2015:11:05:59 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 31402 +62.75.198.179 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 452 +80.91.33.133 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25508 +144.92.16.161 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 29252 +195.154.77.170 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19649 +50.57.209.92 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 24457 +144.76.117.56 - - [17/May/2015:11:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 10519 +80.91.33.133 - - [17/May/2015:11:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 6815 +144.76.137.134 - - [17/May/2015:11:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 798 +188.138.60.101 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19441 +54.172.198.124 - - [17/May/2015:11:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 200 2582 "-" "urlgrabber/3.9.1 yum/3.4.3" 17903 +37.187.238.39 - - [17/May/2015:11:05:27 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 3443 +178.32.54.253 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9634 +62.75.198.180 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5417 +62.75.167.106 - - [17/May/2015:11:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1055 +195.210.47.239 - - [17/May/2015:11:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 4218 +91.234.194.89 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23355 +31.22.86.126 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29547 +91.194.188.90 - - [17/May/2015:11:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Wget/1.13.4 (linux-gnu)" 26988 +92.50.100.22 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13600 +144.76.151.58 - - [17/May/2015:11:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18988 +93.64.134.186 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2281 +85.214.47.178 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16054 +94.23.21.169 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21647 +80.91.33.133 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31277 +80.91.33.133 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19500 +91.121.161.213 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29579 +83.161.14.106 - - [17/May/2015:11:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 1080 +54.64.16.235 - - [17/May/2015:11:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 15057 +84.53.65.28 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5805 +80.91.33.133 - - [17/May/2015:11:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 32764 +50.57.209.92 - - [17/May/2015:11:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28248 +91.239.186.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32046 +144.92.16.161 - - [17/May/2015:11:05:30 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 31342 +62.210.138.59 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22861 +210.245.80.75 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32649 +80.91.33.133 - - [17/May/2015:11:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11268 +83.161.14.106 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8233 +87.233.156.242 - - [17/May/2015:11:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 10052 +5.83.131.103 - - [17/May/2015:11:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20084 +80.91.33.133 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9007 +91.120.61.154 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8410 +195.154.233.202 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20582 +80.91.33.133 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8327 +193.192.58.163 - - [17/May/2015:11:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4041 +93.190.71.150 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26973 +144.76.160.62 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 24342 +50.57.209.92 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27744 +62.75.198.179 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2455 +193.192.59.41 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19596 +195.154.77.170 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23424 +80.91.33.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 4171 +200.6.73.40 - - [17/May/2015:11:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8274 +188.138.60.101 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2949 +80.91.33.133 - - [17/May/2015:11:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 5641 +80.91.33.133 - - [17/May/2015:11:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 28746 +80.91.33.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18396 +80.91.33.133 - - [17/May/2015:11:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 17638 +80.91.33.133 - - [17/May/2015:11:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 7865 +144.76.137.134 - - [17/May/2015:11:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4280 +80.70.214.71 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Wget/1.13.4 (linux-gnu)" 32436 +144.76.117.56 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 30048 +94.23.21.169 - - [17/May/2015:11:05:21 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6186 +198.61.216.151 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 21567 +80.91.33.133 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 674 +91.194.188.90 - - [17/May/2015:11:05:32 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 5354 +62.75.198.180 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5345 +80.91.33.133 - - [17/May/2015:11:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2326 +31.22.86.126 - - [17/May/2015:12:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3114 +84.53.65.28 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9036 +144.92.16.161 - - [17/May/2015:12:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 9410 +50.57.209.92 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2039 +5.83.131.103 - - [17/May/2015:12:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14852 +5.83.131.103 - - [17/May/2015:12:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 71 +62.75.167.106 - - [17/May/2015:12:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6439 +178.32.54.253 - - [17/May/2015:12:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8721 +91.121.161.213 - - [17/May/2015:12:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1795 +91.234.194.89 - - [17/May/2015:12:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8556 +37.187.238.39 - - [17/May/2015:12:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17627 +91.239.186.133 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10970 +87.233.156.242 - - [17/May/2015:12:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 409 +202.143.95.26 - - [17/May/2015:12:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 10283 +144.76.151.58 - - [17/May/2015:12:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22461 +62.210.138.59 - - [17/May/2015:12:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22736 +80.91.33.133 - - [17/May/2015:12:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 21014 +83.161.14.106 - - [17/May/2015:12:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 18047 +80.91.33.133 - - [17/May/2015:12:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25206 +5.83.131.103 - - [17/May/2015:12:05:21 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15330 +80.91.33.133 - - [17/May/2015:12:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 8763 +198.61.216.151 - - [17/May/2015:12:05:59 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11132 +195.154.77.170 - - [17/May/2015:12:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23768 diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md index 3f397cdfbe36f..333630c958703 100644 --- a/plugins/inputs/ethtool/README.md +++ b/plugins/inputs/ethtool/README.md @@ -1,6 +1,6 @@ # Ethtool Input Plugin -The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver +The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver. ### Configuration: @@ -12,22 +12,31 @@ The ethtool input plugin pulls ethernet device stats. Fields pulled will depend ## List of interfaces to ignore when pulling metrics. # interface_exclude = ["eth1"] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] ``` -Interfaces can be included or ignored using +Interfaces can be included or ignored using: - `interface_include` - `interface_exclude` -Note that loopback interfaces will be automatically ignored +Note that loopback interfaces will be automatically ignored. ### Metrics: -Metrics are dependant on the network device and driver +Metrics are dependent on the network device and driver. ### Example Output: ``` -ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 -ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 +ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,interface_up=1i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 +ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,interface_up=0i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 ``` diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go index 3f8f8e15618a2..256652640f383 100644 --- a/plugins/inputs/ethtool/ethtool.go +++ b/plugins/inputs/ethtool/ethtool.go @@ -20,6 +20,9 @@ type Ethtool struct { // This is the list of interface names to ignore InterfaceExclude []string `toml:"interface_exclude"` + // Normalization on the key names + NormalizeKeys []string `toml:"normalize_keys"` + Log telegraf.Logger `toml:"-"` // the ethtool command @@ -27,9 +30,10 @@ type Ethtool struct { } const ( - pluginName = "ethtool" - tagInterface = "interface" - tagDriverName = "driver" + pluginName = "ethtool" + tagInterface = "interface" + tagDriverName = "driver" + fieldInterfaceUp = "interface_up" sampleConfig = ` ## List of interfaces to pull metrics for @@ -37,6 +41,15 @@ const ( ## List of interfaces to ignore when pulling metrics. # interface_exclude = ["eth1"] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] ` ) diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index b8c9312cbe309..16081e4cd831a 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -1,24 +1,27 @@ +//go:build linux // +build linux package ethtool import ( "net" + "regexp" + "strings" "sync" + "github.com/pkg/errors" + ethtoolLib "github.com/safchain/ethtool" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/pkg/errors" - "github.com/safchain/ethtool" ) type CommandEthtool struct { - ethtool *ethtool.Ethtool + ethtool *ethtoolLib.Ethtool } func (e *Ethtool) Gather(acc telegraf.Accumulator) error { - // Get the list of interfaces interfaces, err := e.command.Interfaces() if err != nil { @@ -35,7 +38,6 @@ func (e *Ethtool) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, iface := range interfaces { - // Check this isn't a loop back and that its matched by the filter if (iface.Flags&net.FlagLoopback == 0) && interfaceFilter.Match(iface.Name) { wg.Add(1) @@ -59,7 +61,6 @@ func (e *Ethtool) Init() error { // Gather the stats for the interface. func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulator) { - tags := make(map[string]string) tags[tagInterface] = iface.Name @@ -80,24 +81,69 @@ func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulat return } + fields[fieldInterfaceUp] = e.interfaceUp(iface) for k, v := range stats { - fields[k] = v + fields[e.normalizeKey(k)] = v } acc.AddFields(pluginName, fields, tags) } +// normalize key string; order matters to avoid replacing whitespace with +// underscores, then trying to trim those same underscores. Likewise with +// camelcase before trying to lower case things. +func (e *Ethtool) normalizeKey(key string) string { + // must trim whitespace or this will have a leading _ + if inStringSlice(e.NormalizeKeys, "snakecase") { + key = camelCase2SnakeCase(strings.TrimSpace(key)) + } + // must occur before underscore, otherwise nothing to trim + if inStringSlice(e.NormalizeKeys, "trim") { + key = strings.TrimSpace(key) + } + if inStringSlice(e.NormalizeKeys, "lower") { + key = strings.ToLower(key) + } + if inStringSlice(e.NormalizeKeys, "underscore") { + key = strings.ReplaceAll(key, " ", "_") + } + + return key +} + +func camelCase2SnakeCase(value string) string { + matchFirstCap := regexp.MustCompile("(.)([A-Z][a-z]+)") + matchAllCap := regexp.MustCompile("([a-z0-9])([A-Z])") + + snake := matchFirstCap.ReplaceAllString(value, "${1}_${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}") + return strings.ToLower(snake) +} + +func inStringSlice(slice []string, value string) bool { + for _, item := range slice { + if item == value { + return true + } + } + + return false +} + +func (e *Ethtool) interfaceUp(iface net.Interface) bool { + return (iface.Flags & net.FlagUp) != 0 +} + func NewCommandEthtool() *CommandEthtool { return &CommandEthtool{} } func (c *CommandEthtool) Init() error { - if c.ethtool != nil { return nil } - e, err := ethtool.NewEthtool() + e, err := ethtoolLib.NewEthtool() if err == nil { c.ethtool = e } @@ -114,7 +160,6 @@ func (c *CommandEthtool) Stats(intf string) (map[string]uint64, error) { } func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { - // Get the list of interfaces interfaces, err := net.Interfaces() if err != nil { @@ -125,7 +170,6 @@ func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { } func init() { - inputs.Add(pluginName, func() telegraf.Input { return &Ethtool{ InterfaceInclude: []string{}, diff --git a/plugins/inputs/ethtool/ethtool_notlinux.go b/plugins/inputs/ethtool/ethtool_notlinux.go index b022e0a46bb72..ce149ecd6e69c 100644 --- a/plugins/inputs/ethtool/ethtool_notlinux.go +++ b/plugins/inputs/ethtool/ethtool_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ethtool diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index d281644a51ed0..f9573ee054429 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ethtool @@ -6,19 +7,21 @@ import ( "net" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf/testutil" ) var command *Ethtool var interfaceMap map[string]*InterfaceMock type InterfaceMock struct { - Name string - DriverName string - Stat map[string]uint64 - LoopBack bool + Name string + DriverName string + Stat map[string]uint64 + LoopBack bool + InterfaceUp bool } type CommandEthtoolMock struct { @@ -30,23 +33,25 @@ func (c *CommandEthtoolMock) Init() error { return nil } -func (c *CommandEthtoolMock) DriverName(intf string) (driverName string, err error) { +func (c *CommandEthtoolMock) DriverName(intf string) (string, error) { i := c.InterfaceMap[intf] if i != nil { - driverName = i.DriverName - return + return i.DriverName, nil } - return driverName, errors.New("interface not found") + return "", errors.New("interface not found") } func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { interfaceNames := make([]net.Interface, 0) for k, v := range c.InterfaceMap { - - // Whether to set the flag to loopback - flag := net.FlagUp + var flag net.Flags + // When interface is up + if v.InterfaceUp { + flag |= net.FlagUp + } + // For loopback interface if v.LoopBack { - flag = net.FlagLoopback + flag |= net.FlagLoopback } // Create a dummy interface @@ -62,20 +67,19 @@ func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { return interfaceNames, nil } -func (c *CommandEthtoolMock) Stats(intf string) (stat map[string]uint64, err error) { +func (c *CommandEthtoolMock) Stats(intf string) (map[string]uint64, error) { i := c.InterfaceMap[intf] if i != nil { - stat = i.Stat - return + return i.Stat, nil } - return stat, errors.New("interface not found") + return nil, errors.New("interface not found") } func setup() { - interfaceMap = make(map[string]*InterfaceMock) eth1Stat := map[string]uint64{ + "interface_up": 1, "port_rx_1024_to_15xx": 25167245, "port_rx_128_to_255": 1573526387, "port_rx_15xx_to_jumbo": 137819058, @@ -173,10 +177,11 @@ func setup() { "tx_tso_fallbacks": 0, "tx_tso_long_headers": 0, } - eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false} + eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false, true} interfaceMap[eth1.Name] = eth1 eth2Stat := map[string]uint64{ + "interface_up": 0, "port_rx_1024_to_15xx": 11529312, "port_rx_128_to_255": 1868952037, "port_rx_15xx_to_jumbo": 130339387, @@ -274,14 +279,14 @@ func setup() { "tx_tso_fallbacks": 0, "tx_tso_long_headers": 0, } - eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false} + eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false, false} interfaceMap[eth2.Name] = eth2 // dummy loopback including dummy stat to ensure that the ignore feature is working lo0Stat := map[string]uint64{ "dummy": 0, } - lo0 := &InterfaceMock{"lo0", "", lo0Stat, true} + lo0 := &InterfaceMock{"lo0", "", lo0Stat, true, true} interfaceMap[lo0.Name] = lo0 c := &CommandEthtoolMock{interfaceMap} @@ -301,7 +306,6 @@ func toStringMapInterface(in map[string]uint64) map[string]interface{} { } func TestGather(t *testing.T) { - setup() var acc testutil.Accumulator @@ -324,7 +328,6 @@ func TestGather(t *testing.T) { } func TestGatherIncludeInterfaces(t *testing.T) { - setup() var acc testutil.Accumulator @@ -352,7 +355,6 @@ func TestGatherIncludeInterfaces(t *testing.T) { } func TestGatherIgnoreInterfaces(t *testing.T) { - setup() var acc testutil.Accumulator @@ -377,5 +379,120 @@ func TestGatherIgnoreInterfaces(t *testing.T) { "driver": "driver1", } acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) +} +type TestCase struct { + normalization []string + stats map[string]uint64 + expectedFields map[string]uint64 +} + +func TestNormalizedKeys(t *testing.T) { + cases := []TestCase{ + { + normalization: []string{"underscore"}, + stats: map[string]uint64{ + "port rx": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "_Port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower"}, + stats: map[string]uint64{ + "Port rx": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "_port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower", "trim"}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower", "snakecase", "trim"}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"snakecase"}, + stats: map[string]uint64{ + " PortRX ": 1, + " PortTX": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + }, + } + for _, c := range cases { + eth0 := &InterfaceMock{"eth0", "e1000e", c.stats, false, true} + expectedTags := map[string]string{ + "interface": eth0.Name, + "driver": eth0.DriverName, + } + + interfaceMap = make(map[string]*InterfaceMock) + interfaceMap[eth0.Name] = eth0 + + cmd := &CommandEthtoolMock{interfaceMap} + command = &Ethtool{ + InterfaceInclude: []string{}, + InterfaceExclude: []string{}, + NormalizeKeys: c.normalization, + command: cmd, + } + + var acc testutil.Accumulator + err := command.Gather(&acc) + + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 1) + + acc.AssertContainsFields(t, pluginName, toStringMapInterface(c.expectedFields)) + acc.AssertContainsTaggedFields(t, pluginName, toStringMapInterface(c.expectedFields), expectedTags) + } } diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md index 06c43cf318d39..c0533b513b8bf 100644 --- a/plugins/inputs/eventhub_consumer/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -18,8 +18,6 @@ The main focus for development of this plugin is Azure IoT hub: ## This requires one of the following sets of environment variables to be set: ## ## 1) Expected Environment Variables: - ## - "EVENTHUB_NAMESPACE" - ## - "EVENTHUB_NAME" ## - "EVENTHUB_CONNECTION_STRING" ## ## 2) Expected Environment Variables: @@ -28,8 +26,17 @@ The main focus for development of this plugin is Azure IoT hub: ## - "EVENTHUB_KEY_NAME" ## - "EVENTHUB_KEY_VALUE" + ## 3) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "AZURE_TENANT_ID" + ## - "AZURE_CLIENT_ID" + ## - "AZURE_CLIENT_SECRET" + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. ## This can either be the associated environment variable or hard coded directly. + ## If this option is uncommented, environment variables will be ignored. + ## Connection string should contain EventHubName (EntityPath) # connection_string = "" ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index 17092de3217eb..064502b0ed831 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -6,8 +6,9 @@ import ( "sync" "time" - eventhub "github.com/Azure/azure-event-hubs-go/v3" + eventhubClient "github.com/Azure/azure-event-hubs-go/v3" "github.com/Azure/azure-event-hubs-go/v3/persist" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -54,7 +55,7 @@ type EventHub struct { Log telegraf.Logger `toml:"-"` // Azure - hub *eventhub.Hub + hub *eventhubClient.Hub cancel context.CancelFunc wg sync.WaitGroup @@ -69,8 +70,6 @@ func (*EventHub) SampleConfig() string { ## This requires one of the following sets of environment variables to be set: ## ## 1) Expected Environment Variables: - ## - "EVENTHUB_NAMESPACE" - ## - "EVENTHUB_NAME" ## - "EVENTHUB_CONNECTION_STRING" ## ## 2) Expected Environment Variables: @@ -79,8 +78,17 @@ func (*EventHub) SampleConfig() string { ## - "EVENTHUB_KEY_NAME" ## - "EVENTHUB_KEY_VALUE" + ## 3) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "AZURE_TENANT_ID" + ## - "AZURE_CLIENT_ID" + ## - "AZURE_CLIENT_SECRET" + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. ## This can either be the associated environment variable or hard coded directly. + ## If this option is uncommented, environment variables will be ignored. + ## Connection string should contain EventHubName (EntityPath) # connection_string = "" ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister @@ -165,7 +173,7 @@ func (e *EventHub) Init() (err error) { } // Set hub options - hubOpts := []eventhub.HubOption{} + hubOpts := []eventhubClient.HubOption{} if e.PersistenceDir != "" { persister, err := persist.NewFilePersister(e.PersistenceDir) @@ -173,20 +181,20 @@ func (e *EventHub) Init() (err error) { return err } - hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister)) + hubOpts = append(hubOpts, eventhubClient.HubWithOffsetPersistence(persister)) } if e.UserAgent != "" { - hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent)) + hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(e.UserAgent)) } else { - hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken())) + hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(internal.ProductToken())) } // Create event hub connection if e.ConnectionString != "" { - e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...) + e.hub, err = eventhubClient.NewHubFromConnectionString(e.ConnectionString, hubOpts...) } else { - e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...) + e.hub, err = eventhubClient.NewHubFromEnvironment(hubOpts...) } return err @@ -207,11 +215,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { }() // Configure receiver options - receiveOpts, err := e.configureReceiver() - if err != nil { - return err - } - + receiveOpts := e.configureReceiver() partitions := e.PartitionIDs if len(e.PartitionIDs) == 0 { @@ -224,7 +228,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { } for _, partitionID := range partitions { - _, err = e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) + _, err := e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) if err != nil { return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err) } @@ -233,34 +237,34 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { return nil } -func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) { - receiveOpts := []eventhub.ReceiveOption{} +func (e *EventHub) configureReceiver() []eventhubClient.ReceiveOption { + receiveOpts := []eventhubClient.ReceiveOption{} if e.ConsumerGroup != "" { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithConsumerGroup(e.ConsumerGroup)) } if !e.FromTimestamp.IsZero() { - receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveFromTimestamp(e.FromTimestamp)) } else if e.Latest { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset()) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithLatestOffset()) } if e.PrefetchCount != 0 { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithPrefetchCount(e.PrefetchCount)) } if e.Epoch != 0 { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithEpoch(e.Epoch)) } - return receiveOpts, nil + return receiveOpts } // OnMessage handles an Event. When this function returns without error the // Event is immediately accepted and the offset is updated. If an error is // returned the Event is marked for redelivery. -func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error { +func (e *EventHub) onMessage(ctx context.Context, event *eventhubClient.Event) error { metrics, err := e.createMetrics(event) if err != nil { return err @@ -342,7 +346,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { } // CreateMetrics returns the Metrics from the Event. -func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { +func (e *EventHub) createMetrics(event *eventhubClient.Event) ([]telegraf.Metric, error) { metrics, err := e.parser.Parse(event.Data) if err != nil { return nil, err diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/example/README.md similarity index 100% rename from plugins/inputs/EXAMPLE_README.md rename to plugins/inputs/example/README.md diff --git a/plugins/inputs/example/example.go b/plugins/inputs/example/example.go new file mode 100644 index 0000000000000..c8f5992fe660a --- /dev/null +++ b/plugins/inputs/example/example.go @@ -0,0 +1,136 @@ +package example + +import ( + "fmt" + "math/rand" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Example struct should be named the same as the Plugin +type Example struct { + // Example for a mandatory option to set a tag + DeviceName string `toml:"device_name"` + + // Config options are converted to the correct type automatically + NumberFields int64 `toml:"number_fields"` + + // We can also use booleans and have diverging names between user-configuration options and struct members + EnableRandomVariable bool `toml:"enable_random"` + + // Example of passing a duration option allowing the format of e.g. "100ms", "5m" or "1h" + Timeout config.Duration `toml:"timeout"` + + // Telegraf logging facility + // The exact name is important to allow automatic initialization by telegraf. + Log telegraf.Logger `toml:"-"` + + // This is a non-exported internal state. + count int64 +} + +// Usually the default (example) configuration is contained in this constant. +// Please use '## '' to denote comments and '# ' to specify default settings and start each line with two spaces. +const sampleConfig = ` + ## Device name used as a tag + ## This is a mandatory option that needs to be set by the user, so we do not + ## comment it. + device_name = "" + + ## Number of fields contained in the output + ## This should be greater than zero and less then ten. + ## Here, two is the default, so we comment the option with the default value shown. + # number_fields = 2 + + ## Enable setting the field(s) to random values + ## By default, the field values are set to zero. + # enable_random = false + + ## Specify a duration allowing time-unit suffixes ('ns','ms', 's', 'm', etc.) + # timeout = "100ms" +` + +// Description will appear directly above the plugin definition in the config file +func (m *Example) Description() string { + return `This is an example plugin` +} + +// SampleConfig will populate the sample configuration portion of the plugin's configuration +func (m *Example) SampleConfig() string { + return sampleConfig +} + +// Init can be implemented to do one-time processing stuff like initializing variables +func (m *Example) Init() error { + // Check your options according to your requirements + if m.DeviceName == "" { + return fmt.Errorf("device name cannot be empty") + } + + // Set your defaults. + // Please note: In golang all fields are initialzed to their nil value, so you should not + // set these fields if the nil value is what you want (e.g. for booleans). + if m.NumberFields < 1 { + m.Log.Debugf("Setting number of fields to default from invalid value %d", m.NumberFields) + m.NumberFields = 2 + } + + // Initialze your internal states + m.count = 1 + + return nil +} + +// Gather defines what data the plugin will gather. +func (m *Example) Gather(acc telegraf.Accumulator) error { + // Imagine some completely arbitrary error occuring here + if m.NumberFields > 10 { + return fmt.Errorf("too many fields") + } + + // For illustration we gather three metrics in one go + for run := 0; run < 3; run++ { + // Imagine an error occurs here but you want to keep the other + // metrics, then you cannot simply return, as this would drop + // all later metrics. Simply accumulate errors in this case + // and ignore the metric. + if m.EnableRandomVariable && m.DeviceName == "flappy" && run > 1 { + acc.AddError(fmt.Errorf("too many runs for random values")) + continue + } + + // Construct the fields + fields := map[string]interface{}{"count": m.count} + for i := int64(1); i < m.NumberFields; i++ { + name := fmt.Sprintf("field%d", i) + value := 0.0 + if m.EnableRandomVariable { + value = rand.Float64() + } + fields[name] = value + } + + // Construct the tags + tags := map[string]string{"device": m.DeviceName} + + // Add the metric with the current timestamp + acc.AddFields("example", fields, tags) + + m.count++ + } + + return nil +} + +// Register the plugin +func init() { + inputs.Add("example", func() telegraf.Input { + return &Example{ + // Set the default timeout here to distinguish it from the user setting it to zero + Timeout: config.Duration(100 * time.Millisecond), + } + }) +} diff --git a/plugins/inputs/example/example_test.go b/plugins/inputs/example/example_test.go new file mode 100644 index 0000000000000..1c3b4b0a5e66e --- /dev/null +++ b/plugins/inputs/example/example_test.go @@ -0,0 +1,439 @@ +package example + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +// This file should contain a set of unit-tests to cover your plugin. This will ease +// spotting bugs and mistakes when later modifying or extending the functionality. +// To do so, please write one 'TestXYZ' function per 'case' e.g. default init, +// things that should fail or expected values from a mockup. + +func TestInitDefault(t *testing.T) { + // This test should succeed with the default initialization. + + // Use whatever you use in the init() function plus the mandatory options. + // ATTENTION: Always initialze the "Log" as you will get SIGSEGV otherwise. + plugin := &Example{ + DeviceName: "test", + Timeout: config.Duration(100 * time.Millisecond), + Log: testutil.Logger{}, + } + + // Test the initialization succeeds + require.NoError(t, plugin.Init()) + + // Also test that default values are set correctly + require.Equal(t, config.Duration(100*time.Millisecond), plugin.Timeout) + require.Equal(t, "test", plugin.DeviceName) + require.Equal(t, int64(2), plugin.NumberFields) +} + +func TestInitFail(t *testing.T) { + // You should also test for your safety nets to work i.e. you get errors for + // invalid configuration-option values. So check your error paths in Init() + // and check if you reach them + + // We setup a table-test here to specify "setting" - "expected error" values. + // Eventhough it seems overkill here for the example plugin, we reuse this structure + // later for checking the metrics + tests := []struct { + name string + plugin *Example + expected string + }{ + { + name: "all empty", + plugin: &Example{}, + expected: "device name cannot be empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Always initialze the logger to avoid SIGSEGV. This is done automatically by + // telegraf during normal operation. + tt.plugin.Log = testutil.Logger{} + err := tt.plugin.Init() + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func TestFixedValue(t *testing.T) { + // You can organize the test e.g. by operation mode (like we do here random vs. fixed), by features or + // by different metrics gathered. Please choose the partitioning most suited for your plugin + + // We again setup a table-test here to specify "setting" - "expected output metric" pairs. + tests := []struct { + name string + plugin *Example + expected []telegraf.Metric + }{ + { + name: "count only", + plugin: &Example{ + DeviceName: "test", + NumberFields: 1, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "default settings", + plugin: &Example{ + DeviceName: "test", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "more fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 4, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. In case you use acc.AddError() somewhere + // in your code, it is not sufficient to only check the return value of Gather(). + require.NoError(t, tt.plugin.Gather(&acc)) + require.Len(t, acc.Errors, 0, "found errors accumulated by acc.AddError()") + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(len(tt.expected)) + + // Compare the metrics in a convenient way. Here we ignore + // the metric time during comparision as we cannot inject the time + // during test. For more comparision options check testutil package. + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} + +func TestRandomValue(t *testing.T) { + // Sometimes, you cannot know the exact outcome of the gather cycle e.g. if the gathering involves random data. + // However, you should check the result nevertheless, applying as many conditions as you can. + + // We again setup a table-test here to specify "setting" - "expected output metric" pairs. + tests := []struct { + name string + plugin *Example + template telegraf.Metric + }{ + { + name: "count only", + plugin: &Example{ + DeviceName: "test", + NumberFields: 1, + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + }, + { + name: "default settings", + plugin: &Example{ + DeviceName: "test", + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + }, + { + name: "more fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 4, + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. In case you use acc.AddError() somewhere + // in your code, it is not sufficient to only check the return value of Gather(). + require.NoError(t, tt.plugin.Gather(&acc)) + require.Len(t, acc.Errors, 0, "found errors accumulated by acc.AddError()") + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(3) + + // Compare all aspects of the metric that are known to you + for i, m := range acc.GetTelegrafMetrics() { + require.Equal(t, m.Name(), tt.template.Name()) + require.Equal(t, m.Tags(), tt.template.Tags()) + + // Check if all expected fields are there + fields := m.Fields() + for k := range tt.template.Fields() { + if k == "count" { + require.Equal(t, fields["count"], int64(i+1)) + continue + } + _, found := fields[k] + require.Truef(t, found, "field %q not found", k) + } + } + }) + } +} + +func TestGatherFail(t *testing.T) { + // You should also test for error conditions in your Gather() method. Try to cover all error paths. + + // We again setup a table-test here to specify "setting" - "expected error" pair. + tests := []struct { + name string + plugin *Example + expected string + }{ + { + name: "too many fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 11, + }, + expected: "too many fields", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + err := tt.plugin.Gather(&acc) + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func TestRandomValueFailPartial(t *testing.T) { + // You should also test for error conditions in your Gather() with partial output. This is required when + // using acc.AddError() as Gather() might succeed (return nil) but there are some metrics missing. + + // We again setup a table-test here to specify "setting" - "expected output metric" and "errors". + tests := []struct { + name string + plugin *Example + expected []telegraf.Metric + expectedErr string + }{ + { + name: "flappy gather", + plugin: &Example{ + DeviceName: "flappy", + NumberFields: 1, + EnableRandomVariable: true, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "flappy", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "flappy", + }, + map[string]interface{}{ + "count": 2, + }, + time.Unix(0, 0), + ), + }, + expectedErr: "too many runs for random values", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. However, we expect an error accumulated by acc.AddError() + require.NoError(t, tt.plugin.Gather(&acc)) + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(len(tt.expected)) + + // Check the accumulated errors + require.Len(t, acc.Errors, 1) + require.EqualError(t, acc.Errors[0], tt.expectedErr) + + // Compare the expected partial metrics. + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index cb4420b0f246f..e8ba23db44522 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -3,19 +3,22 @@ package exec import ( "bytes" "fmt" - "os/exec" + "io" + osExec "os/exec" "path/filepath" "runtime" "strings" "sync" "time" + "github.com/kballard/go-shellquote" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/nagios" - "github.com/kballard/go-shellquote" ) const sampleConfig = ` @@ -39,12 +42,12 @@ const sampleConfig = ` data_format = "influx" ` -const MaxStderrBytes = 512 +const MaxStderrBytes int = 512 type Exec struct { - Commands []string - Command string - Timeout internal.Duration + Commands []string `toml:"commands"` + Command string `toml:"command"` + Timeout config.Duration `toml:"timeout"` parser parsers.Parser @@ -55,7 +58,7 @@ type Exec struct { func NewExec() *Exec { return &Exec{ runner: CommandRunner{}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -69,12 +72,12 @@ func (c CommandRunner) Run( command string, timeout time.Duration, ) ([]byte, []byte, error) { - split_cmd, err := shellquote.Split(command) - if err != nil || len(split_cmd) == 0 { + splitCmd, err := shellquote.Split(command) + if err != nil || len(splitCmd) == 0 { return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err) } - cmd := exec.Command(split_cmd[0], split_cmd[1:]...) + cmd := osExec.Command(splitCmd[0], splitCmd[1:]...) var ( out bytes.Buffer @@ -85,16 +88,16 @@ func (c CommandRunner) Run( runErr := internal.RunTimeout(cmd, timeout) - out = removeCarriageReturns(out) - if stderr.Len() > 0 { - stderr = removeCarriageReturns(stderr) - stderr = truncate(stderr) + out = removeWindowsCarriageReturns(out) + if stderr.Len() > 0 && !telegraf.Debug { + stderr = removeWindowsCarriageReturns(stderr) + stderr = c.truncate(stderr) } return out.Bytes(), stderr.Bytes(), runErr } -func truncate(buf bytes.Buffer) bytes.Buffer { +func (c CommandRunner) truncate(buf bytes.Buffer) bytes.Buffer { // Limit the number of bytes. didTruncate := false if buf.Len() > MaxStderrBytes { @@ -109,42 +112,36 @@ func truncate(buf bytes.Buffer) bytes.Buffer { buf.Truncate(i) } if didTruncate { + //nolint:errcheck,revive // Will always return nil or panic buf.WriteString("...") } return buf } -// removeCarriageReturns removes all carriage returns from the input if the +// removeWindowsCarriageReturns removes all carriage returns from the input if the // OS is Windows. It does not return any errors. -func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { +func removeWindowsCarriageReturns(b bytes.Buffer) bytes.Buffer { if runtime.GOOS == "windows" { var buf bytes.Buffer for { - byt, er := b.ReadBytes(0x0D) - end := len(byt) - if nil == er { - end -= 1 + byt, err := b.ReadBytes(0x0D) + byt = bytes.TrimRight(byt, "\x0d") + if len(byt) > 0 { + _, _ = buf.Write(byt) } - if nil != byt { - buf.Write(byt[:end]) - } else { - break - } - if nil != er { - break + if err == io.EOF { + return buf } } - b = buf } return b - } func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) { defer wg.Done() _, isNagios := e.parser.(*nagios.NagiosParser) - out, errbuf, runErr := e.runner.Run(command, e.Timeout.Duration) + out, errbuf, runErr := e.runner.Run(command, time.Duration(e.Timeout)) if !isNagios && runErr != nil { err := fmt.Errorf("exec: %s for command '%s': %s", runErr, command, string(errbuf)) acc.AddError(err) diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index d0fcc71f668e5..d0647476c77ae 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package exec import ( @@ -13,10 +19,7 @@ import ( "github.com/stretchr/testify/require" ) -// Midnight 9/22/2015 -const baseTimeSeconds = 1442905200 - -const validJson = ` +const validJSON = ` { "status": "green", "num_processes": 82, @@ -30,25 +33,11 @@ const validJson = ` "users": [0, 1, 2, 3] }` -const malformedJson = ` +const malformedJSON = ` { "status": "green", ` -const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n" -const lineProtocolEmpty = "" -const lineProtocolShort = "ab" - -const lineProtocolMulti = ` -cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - type CarriageReturnTest struct { input []byte output []byte @@ -86,7 +75,7 @@ func newRunnerMock(out []byte, errout []byte, err error) Runner { } } -func (r runnerMock) Run(command string, _ time.Duration) ([]byte, []byte, error) { +func (r runnerMock) Run(_ string, _ time.Duration) ([]byte, []byte, error) { return r.out, r.errout, r.err } @@ -97,7 +86,7 @@ func TestExec(t *testing.T) { }) e := &Exec{ Log: testutil.Logger{}, - runner: newRunnerMock([]byte(validJson), nil, nil), + runner: newRunnerMock([]byte(validJSON), nil, nil), Commands: []string{"testcommand arg1"}, parser: parser, } @@ -127,7 +116,7 @@ func TestExecMalformed(t *testing.T) { }) e := &Exec{ Log: testutil.Logger{}, - runner: newRunnerMock([]byte(malformedJson), nil, nil), + runner: newRunnerMock([]byte(malformedJSON), nil, nil), Commands: []string{"badcommand arg1"}, parser: parser, } @@ -155,7 +144,7 @@ func TestCommandError(t *testing.T) { } func TestExecCommandWithGlob(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser, _ := parsers.NewValueParser("metric", "string", "", nil) e := NewExec() e.Commands = []string{"/bin/ech* metric_value"} e.SetParser(parser) @@ -171,7 +160,7 @@ func TestExecCommandWithGlob(t *testing.T) { } func TestExecCommandWithoutGlob(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser, _ := parsers.NewValueParser("metric", "string", "", nil) e := NewExec() e.Commands = []string{"/bin/echo metric_value"} e.SetParser(parser) @@ -187,7 +176,7 @@ func TestExecCommandWithoutGlob(t *testing.T) { } func TestExecCommandWithoutGlobAndPath(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser, _ := parsers.NewValueParser("metric", "string", "", nil) e := NewExec() e.Commands = []string{"echo metric_value"} e.SetParser(parser) @@ -212,12 +201,14 @@ func TestTruncate(t *testing.T) { name: "should not truncate", bufF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world") + _, err := b.WriteString("hello world") + require.NoError(t, err) return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world") + _, err := b.WriteString("hello world") + require.NoError(t, err) return &b }, }, @@ -225,12 +216,14 @@ func TestTruncate(t *testing.T) { name: "should truncate up to the new line", bufF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world\nand all the people") + _, err := b.WriteString("hello world\nand all the people") + require.NoError(t, err) return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world...") + _, err := b.WriteString("hello world...") + require.NoError(t, err) return &b }, }, @@ -239,24 +232,26 @@ func TestTruncate(t *testing.T) { bufF: func() *bytes.Buffer { var b bytes.Buffer for i := 0; i < 2*MaxStderrBytes; i++ { - b.WriteByte('b') + require.NoError(t, b.WriteByte('b')) } return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer for i := 0; i < MaxStderrBytes; i++ { - b.WriteByte('b') + require.NoError(t, b.WriteByte('b')) } - b.WriteString("...") + _, err := b.WriteString("...") + require.NoError(t, err) return &b }, }, } + c := CommandRunner{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res := truncate(*tt.bufF()) + res := c.truncate(*tt.bufF()) require.Equal(t, tt.expF().Bytes(), res.Bytes()) }) } @@ -267,14 +262,14 @@ func TestRemoveCarriageReturns(t *testing.T) { // Test that all carriage returns are removed for _, test := range crTests { b := bytes.NewBuffer(test.input) - out := removeCarriageReturns(*b) + out := removeWindowsCarriageReturns(*b) assert.True(t, bytes.Equal(test.output, out.Bytes())) } } else { // Test that the buffer is returned unaltered for _, test := range crTests { b := bytes.NewBuffer(test.input) - out := removeCarriageReturns(*b) + out := removeWindowsCarriageReturns(*b) assert.True(t, bytes.Equal(test.input, out.Bytes())) } } diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index 4d8789a8d3215..a90b1a92dddf5 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package execd @@ -12,7 +13,7 @@ import ( "github.com/influxdata/telegraf" ) -func (e *Execd) Gather(acc telegraf.Accumulator) error { +func (e *Execd) Gather(_ telegraf.Accumulator) error { if e.process == nil || e.process.Cmd == nil { return nil } @@ -23,17 +24,19 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { } switch e.Signal { case "SIGHUP": - osProcess.Signal(syscall.SIGHUP) + return osProcess.Signal(syscall.SIGHUP) case "SIGUSR1": - osProcess.Signal(syscall.SIGUSR1) + return osProcess.Signal(syscall.SIGUSR1) case "SIGUSR2": - osProcess.Signal(syscall.SIGUSR2) + return osProcess.Signal(syscall.SIGUSR2) case "STDIN": if osStdin, ok := e.process.Stdin.(*os.File); ok { - osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)) + if err := osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)); err != nil { + return fmt.Errorf("setting write deadline failed: %s", err) + } } if _, err := io.WriteString(e.process.Stdin, "\n"); err != nil { - return fmt.Errorf("Error writing to stdin: %s", err) + return fmt.Errorf("writing to stdin failed: %s", err) } case "none": default: diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index a7be617da3a48..a8c8364394480 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -1,5 +1,3 @@ -// +build !windows - package execd import ( @@ -11,17 +9,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/models" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" - - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" ) func TestSettingConfigWorks(t *testing.T) { @@ -142,8 +139,8 @@ func (tm *TestMetricMaker) LogName() string { return tm.Name() } -func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { - return metric +func (tm *TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric { + return aMetric } func (tm *TestMetricMaker) Log() telegraf.Logger { @@ -156,24 +153,27 @@ var counter = flag.Bool("counter", false, func TestMain(m *testing.M) { flag.Parse() if *counter { - runCounterProgram() + if err := runCounterProgram(); err != nil { + os.Exit(1) + } os.Exit(0) } code := m.Run() os.Exit(code) } -func runCounterProgram() { +func runCounterProgram() error { i := 0 serializer, err := serializers.NewInfluxSerializer() if err != nil { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintln(os.Stderr, "ERR InfluxSerializer failed to load") - os.Exit(1) + return err } scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { - metric, _ := metric.New("counter", + m := metric.New("counter", map[string]string{}, map[string]interface{}{ "count": i, @@ -182,12 +182,15 @@ func runCounterProgram() { ) i++ - b, err := serializer.Serialize(metric) + b, err := serializer.Serialize(m) if err != nil { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "ERR %v\n", err) - os.Exit(1) + return err + } + if _, err := fmt.Fprint(os.Stdout, string(b)); err != nil { + return err } - fmt.Fprint(os.Stdout, string(b)) } - + return nil } diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go index 15e6798f2389b..9b1f22204bdc4 100644 --- a/plugins/inputs/execd/execd_windows.go +++ b/plugins/inputs/execd/execd_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package execd diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 2ea0b839b3e2f..cfb54e3ae0708 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "os/signal" "strings" @@ -26,7 +25,6 @@ import ( type empty struct{} var ( - forever = 100 * 365 * 24 * time.Hour envVarEscaper = strings.NewReplacer( `"`, `\"`, `\`, `\\`, @@ -58,8 +56,7 @@ var ( // New creates a new shim interface func New() *Shim { - fmt.Fprintf(os.Stderr, "%s is deprecated; please change your import to %s\n", - oldpkg, newpkg) + _, _ = fmt.Fprintf(os.Stderr, "%s is deprecated; please change your import to %s\n", oldpkg, newpkg) return &Shim{ stdin: os.Stdin, stdout: os.Stdout, @@ -156,7 +153,9 @@ loop: return fmt.Errorf("failed to serialize metric: %s", err) } // Write this to stdout - fmt.Fprint(s.stdout, string(b)) + if _, err := fmt.Fprint(s.stdout, string(b)); err != nil { + return fmt.Errorf("failed to write %q to stdout: %s", string(b), err) + } } } @@ -233,11 +232,17 @@ func (s *Shim) startGathering(ctx context.Context, input telegraf.Input, acc tel return case <-gatherPromptCh: if err := input.Gather(acc); err != nil { - fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err) + if _, perr := fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err); perr != nil { + acc.AddError(err) + acc.AddError(perr) + } } case <-t.C: if err := input.Gather(acc); err != nil { - fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err) + if _, perr := fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err); perr != nil { + acc.AddError(err) + acc.AddError(perr) + } } } } @@ -268,7 +273,7 @@ func LoadConfig(filePath *string) ([]telegraf.Input, error) { return DefaultImportedPlugins() } - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return nil, err } diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go index 4e4a04f141b65..8d7faa2268878 100644 --- a/plugins/inputs/execd/shim/goshim_posix.go +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go index 317f8a2f3d4cb..90adfeff6f6c9 100644 --- a/plugins/inputs/execd/shim/goshim_windows.go +++ b/plugins/inputs/execd/shim/goshim_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package shim diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go index 873ef89bf655f..36e0afcd83167 100644 --- a/plugins/inputs/execd/shim/shim_posix_test.go +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim @@ -7,7 +8,6 @@ import ( "context" "io" "os" - "runtime" "syscall" "testing" "time" @@ -16,10 +16,6 @@ import ( ) func TestShimUSR1SignalingWorks(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip() - return - } stdinReader, stdinWriter := io.Pipe() stdoutReader, stdoutWriter := io.Pipe() @@ -42,7 +38,7 @@ func TestShimUSR1SignalingWorks(t *testing.T) { return // test is done default: // test isn't done, keep going. - process.Signal(syscall.SIGUSR1) + require.NoError(t, process.Signal(syscall.SIGUSR1)) time.Sleep(200 * time.Millisecond) } } @@ -56,7 +52,7 @@ func TestShimUSR1SignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + require.NoError(t, stdinWriter.Close()) readUntilEmpty(r) <-exited diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index dbc3462211222..396928ff44036 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -36,7 +36,8 @@ func TestShimStdinSignalingWorks(t *testing.T) { metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) <-metricProcessed @@ -45,7 +46,7 @@ func TestShimStdinSignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + require.NoError(t, stdinWriter.Close()) readUntilEmpty(r) @@ -71,7 +72,7 @@ func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdou shim.stderr = stderr } - shim.AddInput(inp) + require.NoError(t, shim.AddInput(inp)) go func() { err := shim.Run(interval) require.NoError(t, err) @@ -104,7 +105,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(acc telegraf.Accumulator) error { +func (i *testInput) Start(_ telegraf.Accumulator) error { return nil } @@ -112,8 +113,8 @@ func (i *testInput) Stop() { } func TestLoadConfig(t *testing.T) { - os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") - os.Setenv("SECRET_VALUE", `test"\test`) + require.NoError(t, os.Setenv("SECRET_TOKEN", "xxxxxxxxxx")) + require.NoError(t, os.Setenv("SECRET_VALUE", `test"\test`)) inputs.Add("test", func() telegraf.Input { return &serviceInput{} @@ -156,7 +157,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(acc telegraf.Accumulator) error { +func (i *serviceInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/fail2ban/fail2ban_test.go b/plugins/inputs/fail2ban/fail2ban_test.go index b28d824ee3aed..1afac3d789abd 100644 --- a/plugins/inputs/fail2ban/fail2ban_test.go +++ b/plugins/inputs/fail2ban/fail2ban_test.go @@ -92,7 +92,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { return cmd } -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -101,25 +101,37 @@ func TestHelperProcess(t *testing.T) { cmd, args := args[3], args[4:] if !strings.HasSuffix(cmd, "fail2ban-client") { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // os.Exit called intentionally os.Exit(1) } if len(args) == 1 && args[0] == "status" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if len(args) == 2 && args[0] == "status" { if args[1] == "sshd" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusSshdOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if args[1] == "postfix" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusPostfixOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if args[1] == "dovecot" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusDovecotOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } } + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "invalid argument") + //nolint:revive // os.Exit called intentionally os.Exit(1) } diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 62889cc8dd6f7..b60eecfa8e2a6 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -37,7 +37,7 @@ type Fibaro struct { Username string `toml:"username"` Password string `toml:"password"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` client *http.Client } @@ -101,7 +101,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -121,13 +121,12 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { // Gather fetches all required information to output metrics func (f *Fibaro) Gather(acc telegraf.Accumulator) error { - if f.client == nil { f.client = &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: f.Timeout.Duration, + Timeout: time.Duration(f.Timeout), } } @@ -160,7 +159,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { for _, device := range devices { // skip device in some cases if device.RoomID == 0 || - device.Enabled == false || + !device.Enabled || device.Properties.Dead == "true" || device.Type == "com.fibaro.zwaveDevice" { continue @@ -222,7 +221,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("fibaro", func() telegraf.Input { return &Fibaro{ - Timeout: internal.Duration{Duration: defaultTimeout}, + Timeout: config.Duration(defaultTimeout), } }) } diff --git a/plugins/inputs/fibaro/fibaro_test.go b/plugins/inputs/fibaro/fibaro_test.go index 32a1447e3ef4d..dac8bc6fdf47a 100644 --- a/plugins/inputs/fibaro/fibaro_test.go +++ b/plugins/inputs/fibaro/fibaro_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -162,7 +161,8 @@ func TestJSONSuccess(t *testing.T) { payload = devicesJSON } w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, payload) + _, err := fmt.Fprintln(w, payload) + require.NoError(t, err) })) defer ts.Close() @@ -178,7 +178,7 @@ func TestJSONSuccess(t *testing.T) { require.NoError(t, err) // Gather should add 5 metrics - assert.Equal(t, uint64(5), acc.NMetrics()) + require.Equal(t, uint64(5), acc.NMetrics()) // Ensure fields / values are correct - Device 1 tags := map[string]string{"deviceId": "1", "section": "Section 1", "room": "Room 1", "name": "Device 1", "type": "com.fibaro.binarySwitch"} diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index e431bc6df9f15..22af282dbde0a 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -2,7 +2,7 @@ package file import ( "fmt" - "io/ioutil" + "io" "os" "path/filepath" @@ -115,7 +115,7 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { defer file.Close() r, _ := utfbom.Skip(f.decoder.Reader(file)) - fileContents, err := ioutil.ReadAll(r) + fileContents, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) } diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index 427ff25d8c789..ab09753ca1145 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package file import ( @@ -6,16 +12,18 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestRefreshFilePaths(t *testing.T) { wd, err := os.Getwd() + require.NoError(t, err) + r := File{ Files: []string{filepath.Join(wd, "dev/testfiles/**.log")}, } @@ -24,7 +32,7 @@ func TestRefreshFilePaths(t *testing.T) { err = r.refreshFilePaths() require.NoError(t, err) - assert.Equal(t, 2, len(r.filenames)) + require.Equal(t, 2, len(r.filenames)) } func TestFileTag(t *testing.T) { @@ -42,7 +50,7 @@ func TestFileTag(t *testing.T) { DataFormat: "json", } nParser, err := parsers.NewParser(&parserConfig) - assert.NoError(t, err) + require.NoError(t, err) r.parser = nParser err = r.Gather(&acc) @@ -50,8 +58,8 @@ func TestFileTag(t *testing.T) { for _, m := range acc.Metrics { for key, value := range m.Tags { - assert.Equal(t, r.FileTag, key) - assert.Equal(t, filepath.Base(r.Files[0]), value) + require.Equal(t, r.FileTag, key) + require.Equal(t, filepath.Base(r.Files[0]), value) } } } @@ -69,12 +77,12 @@ func TestJSONParserCompile(t *testing.T) { TagKeys: []string{"parent_ignored_child"}, } nParser, err := parsers.NewParser(&parserConfig) - assert.NoError(t, err) + require.NoError(t, err) r.parser = nParser - r.Gather(&acc) - assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) - assert.Equal(t, 5, len(acc.Metrics[0].Fields)) + require.NoError(t, r.Gather(&acc)) + require.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) + require.Equal(t, 5, len(acc.Metrics[0].Fields)) } func TestGrokParser(t *testing.T) { @@ -93,10 +101,11 @@ func TestGrokParser(t *testing.T) { nParser, err := parsers.NewParser(&parserConfig) r.parser = nParser - assert.NoError(t, err) + require.NoError(t, err) err = r.Gather(&acc) - assert.Equal(t, len(acc.Metrics), 2) + require.NoError(t, err) + require.Len(t, acc.Metrics, 2) } func TestCharacterEncoding(t *testing.T) { diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 30815541c8448..cc72fb348386b 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" "github.com/karrick/godirwalk" @@ -57,19 +57,19 @@ type FileCount struct { Recursive bool RegularOnly bool FollowSymlinks bool - Size internal.Size - MTime internal.Duration `toml:"mtime"` + Size config.Size + MTime config.Duration `toml:"mtime"` fileFilters []fileFilterFunc globPaths []globpath.GlobPath Fs fileSystem Log telegraf.Logger } -func (_ *FileCount) Description() string { +func (fc *FileCount) Description() string { return "Count files in a directory" } -func (_ *FileCount) SampleConfig() string { return sampleConfig } +func (fc *FileCount) SampleConfig() string { return sampleConfig } type fileFilterFunc func(os.FileInfo) (bool, error) @@ -108,7 +108,7 @@ func (fc *FileCount) regularOnlyFilter() fileFilterFunc { } func (fc *FileCount) sizeFilter() fileFilterFunc { - if fc.Size.Size == 0 { + if fc.Size == 0 { return nil } @@ -116,22 +116,22 @@ func (fc *FileCount) sizeFilter() fileFilterFunc { if !f.Mode().IsRegular() { return false, nil } - if fc.Size.Size < 0 { - return f.Size() < -fc.Size.Size, nil + if fc.Size < 0 { + return f.Size() < -int64(fc.Size), nil } - return f.Size() >= fc.Size.Size, nil + return f.Size() >= int64(fc.Size), nil } } func (fc *FileCount) mtimeFilter() fileFilterFunc { - if fc.MTime.Duration == 0 { + if time.Duration(fc.MTime) == 0 { return nil } return func(f os.FileInfo) (bool, error) { - age := absDuration(fc.MTime.Duration) + age := absDuration(time.Duration(fc.MTime)) mtime := time.Now().Add(-age) - if fc.MTime.Duration < 0 { + if time.Duration(fc.MTime) < 0 { return f.ModTime().After(mtime), nil } return f.ModTime().Before(mtime), nil @@ -292,7 +292,6 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) { fc.globPaths = append(fc.globPaths, *glob) } } - } func NewFileCount() *FileCount { @@ -303,8 +302,8 @@ func NewFileCount() *FileCount { Recursive: true, RegularOnly: true, FollowSymlinks: false, - Size: internal.Size{Size: 0}, - MTime: internal.Duration{Duration: 0}, + Size: config.Size(0), + MTime: config.Duration(0), fileFilters: nil, Fs: osFS{}, } diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 568ee07b5d458..d02c28fb6f170 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filecount import ( @@ -9,7 +15,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -30,7 +36,7 @@ func TestNoFiltersOnChildDir(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(600))) } @@ -43,7 +49,7 @@ func TestNoRecursiveButSuperMeta(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(200))) @@ -72,7 +78,7 @@ func TestDoubleAndSimpleStar(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir/nested2"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(400))) @@ -90,12 +96,12 @@ func TestRegularOnlyFilter(t *testing.T) { func TestSizeFilter(t *testing.T) { fc := getNoFilterFileCount() - fc.Size = internal.Size{Size: -100} + fc.Size = config.Size(-100) matches := []string{"foo", "bar", "baz", "subdir/quux", "subdir/quuz"} fileCountEquals(t, fc, len(matches), 0) - fc.Size = internal.Size{Size: 100} + fc.Size = config.Size(100) matches = []string{"qux", "subdir/nested2//qux"} fileCountEquals(t, fc, len(matches), 800) @@ -106,14 +112,14 @@ func TestMTimeFilter(t *testing.T) { fileAge := time.Since(mtime) - (60 * time.Second) fc := getNoFilterFileCount() - fc.MTime = internal.Duration{Duration: -fileAge} + fc.MTime = config.Duration(-fileAge) matches := []string{"foo", "bar", "qux", "subdir/", "subdir/quux", "subdir/quuz", "subdir/nested2", "subdir/nested2/qux"} fileCountEquals(t, fc, len(matches), 5096) - fc.MTime = internal.Duration{Duration: fileAge} + fc.MTime = config.Duration(fileAge) matches = []string{"baz"} fileCountEquals(t, fc, len(matches), 0) } @@ -170,8 +176,8 @@ func getNoFilterFileCount() FileCount { Name: "*", Recursive: true, RegularOnly: false, - Size: internal.Size{Size: 0}, - MTime: internal.Duration{Duration: 0}, + Size: config.Size(0), + MTime: config.Duration(0), fileFilters: nil, Fs: getFakeFileSystem(getTestdataDir()), } @@ -208,31 +214,29 @@ func getFakeFileSystem(basePath string) fakeFileSystem { var dmask uint32 = 0666 // set directory bit - dmask |= (1 << uint(32-1)) + dmask |= 1 << uint(32-1) // create a lookup map for getting "files" from the "filesystem" fileList := map[string]fakeFileInfo{ - basePath: {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, - basePath + "/bar": {name: "bar", filemode: uint32(fmask), modtime: mtime}, - basePath + "/baz": {name: "baz", filemode: uint32(fmask), modtime: olderMtime}, - basePath + "/qux": {name: "qux", size: int64(400), filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/subdir/quux": {name: "quux", filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir/quuz": {name: "quuz", filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)}, + basePath: {name: "testdata", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/foo": {name: "foo", filemode: fmask, modtime: mtime}, + basePath + "/bar": {name: "bar", filemode: fmask, modtime: mtime}, + basePath + "/baz": {name: "baz", filemode: fmask, modtime: olderMtime}, + basePath + "/qux": {name: "qux", size: int64(400), filemode: fmask, modtime: mtime}, + basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/subdir/quux": {name: "quux", filemode: fmask, modtime: mtime}, + basePath + "/subdir/quuz": {name: "quuz", filemode: fmask, modtime: mtime}, + basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/subdir/nested2/qux": {name: "qux", filemode: fmask, modtime: mtime, size: int64(400)}, } - fs := fakeFileSystem{files: fileList} - return fs - + return fakeFileSystem{files: fileList} } func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) { tags := map[string]string{"directory": getTestdataDir()} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(expectedCount))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(expectedSize))) } diff --git a/plugins/inputs/filecount/filesystem_helpers.go b/plugins/inputs/filecount/filesystem_helpers.go index 2bd6c095142cf..f43bb4ad5f394 100644 --- a/plugins/inputs/filecount/filesystem_helpers.go +++ b/plugins/inputs/filecount/filesystem_helpers.go @@ -69,5 +69,4 @@ func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) { return fakeInfo, nil } return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("No such file or directory")} - } diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index 08bb15a2e59cf..a3a3310d3fb4e 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filecount import ( @@ -48,11 +54,12 @@ func TestRealFS(t *testing.T) { fs = getTestFileSystem() // now, the same test as above will return an error as the file doesn't exist in our fake fs expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory" - fileInfo, err = fs.Stat(getTestdataDir() + "/qux") - require.Equal(t, expectedError, err.Error()) + _, err = fs.Stat(getTestdataDir() + "/qux") + require.Error(t, err, expectedError) // and verify that what we DO expect to find, we do fileInfo, err = fs.Stat("/testdata/foo") require.NoError(t, err) + require.NotNil(t, fileInfo) } func getTestFileSystem() fakeFileSystem { @@ -77,14 +84,12 @@ func getTestFileSystem() fakeFileSystem { var dmask uint32 = 0666 // set directory bit - dmask |= (1 << uint(32-1)) + dmask |= 1 << uint(32-1) fileList := map[string]fakeFileInfo{ - "/testdata": {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - "/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, + "/testdata": {name: "testdata", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + "/testdata/foo": {name: "foo", filemode: fmask, modtime: mtime}, } - fs := fakeFileSystem{files: fileList} - return fs - + return fakeFileSystem{files: fileList} } diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index bf8ea6c160361..7d1143b74aaed 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -35,11 +35,18 @@ type FileStat struct { // maps full file paths to globmatch obj globs map[string]*globpath.GlobPath + + // files that were missing - we only log the first time it's not found. + missingFiles map[string]bool + // files that had an error in Stat - we only log the first error. + filesWithErrors map[string]bool } func NewFileStat() *FileStat { return &FileStat{ - globs: make(map[string]*globpath.GlobPath), + globs: make(map[string]*globpath.GlobPath), + missingFiles: make(map[string]bool), + filesWithErrors: make(map[string]bool), } } @@ -85,22 +92,33 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { fileInfo, err := os.Stat(fileName) if os.IsNotExist(err) { fields["exists"] = int64(0) + acc.AddFields("filestat", fields, tags) + if !f.missingFiles[fileName] { + f.Log.Warnf("File %q not found", fileName) + f.missingFiles[fileName] = true + } + continue } + f.missingFiles[fileName] = false if fileInfo == nil { - f.Log.Errorf("Unable to get info for file %q, possible permissions issue", - fileName) + if !f.filesWithErrors[fileName] { + f.filesWithErrors[fileName] = true + f.Log.Errorf("Unable to get info for file %q: %v", + fileName, err) + } } else { + f.filesWithErrors[fileName] = false fields["size_bytes"] = fileInfo.Size() fields["modification_time"] = fileInfo.ModTime().UnixNano() } if f.Md5 { - md5, err := getMd5(fileName) + md5Hash, err := getMd5(fileName) if err != nil { acc.AddError(err) } else { - fields["md5_sum"] = md5 + fields["md5_sum"] = md5Hash } } diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index a38d3b0aacdc4..ac2a9f9a9f75b 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -1,102 +1,125 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filestat import ( - "runtime" - "strings" + "os" + "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" +) + +var ( + testdataDir = getTestdataDir() ) func TestGatherNoMd5(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - dir + "log1.log", - dir + "log2.log", - "/non/existant/file", + filepath.Join(testdataDir, "log1.log"), + filepath.Join(testdataDir, "log2.log"), + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) tags3 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0))) } func TestGatherExplicitFiles(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "log1.log", - dir + "log2.log", - "/non/existant/file", + filepath.Join(testdataDir, "log1.log"), + filepath.Join(testdataDir, "log2.log"), + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0))) } +func TestNonExistentFile(t *testing.T) { + fs := NewFileStat() + fs.Log = testutil.Logger{} + fs.Md5 = true + fs.Files = []string{ + "/non/existant/file", + } + acc := testutil.Accumulator{} + require.NoError(t, acc.GatherError(fs.Gather)) + + acc.AssertContainsFields(t, "filestat", map[string]interface{}{"exists": int64(0)}) + require.False(t, acc.HasField("filestat", "error")) + require.False(t, acc.HasField("filestat", "md5_sum")) + require.False(t, acc.HasField("filestat", "size_bytes")) + require.False(t, acc.HasField("filestat", "modification_time")) +} + func TestGatherGlob(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "*.log", + filepath.Join(testdataDir, "*.log"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) @@ -104,33 +127,32 @@ func TestGatherGlob(t *testing.T) { } func TestGatherSuperAsterisk(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "**", + filepath.Join(testdataDir, "**"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ - "file": dir + "test.conf", + "file": filepath.Join(testdataDir, "test.conf"), } require.True(t, acc.HasPoint("filestat", tags3, "size_bytes", int64(104))) require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(1))) @@ -138,18 +160,17 @@ func TestGatherSuperAsterisk(t *testing.T) { } func TestModificationTime(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - dir + "log1.log", + filepath.Join(testdataDir, "log1.log"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) @@ -160,30 +181,34 @@ func TestNoModificationTime(t *testing.T) { fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - "/non/existant/file", + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(0))) require.False(t, acc.HasInt64Field("filestat", "modification_time")) } func TestGetMd5(t *testing.T) { - dir := getTestdataDir() - md5, err := getMd5(dir + "test.conf") - assert.NoError(t, err) - assert.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) + md5, err := getMd5(filepath.Join(testdataDir, "test.conf")) + require.NoError(t, err) + require.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) - md5, err = getMd5("/tmp/foo/bar/fooooo") - assert.Error(t, err) + _, err = getMd5("/tmp/foo/bar/fooooo") + require.Error(t, err) } func getTestdataDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "filestat_test.go", "testdata/", 1) + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") } diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index a92930aae9598..eba11d6196409 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -8,25 +8,25 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Fireboard gathers statistics from the fireboard.io servers type Fireboard struct { - AuthToken string `toml:"auth_token"` - URL string `toml:"url"` - HTTPTimeout internal.Duration `toml:"http_timeout"` + AuthToken string `toml:"auth_token"` + URL string `toml:"url"` + HTTPTimeout config.Duration `toml:"http_timeout"` client *http.Client } // NewFireboard return a new instance of Fireboard with a default http client func NewFireboard() *Fireboard { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second} client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return &Fireboard{client: client} } @@ -69,26 +69,24 @@ func (r *Fireboard) Description() string { // Init the things func (r *Fireboard) Init() error { - if len(r.AuthToken) == 0 { - return fmt.Errorf("You must specify an authToken") + return fmt.Errorf("you must specify an authToken") } if len(r.URL) == 0 { r.URL = "https://fireboard.io/api/v1/devices.json" } // Have a default timeout of 4s - if r.HTTPTimeout.Duration == 0 { - r.HTTPTimeout.Duration = time.Second * 4 + if r.HTTPTimeout == 0 { + r.HTTPTimeout = config.Duration(time.Second * 4) } - r.client.Timeout = r.HTTPTimeout.Duration + r.client.Timeout = time.Duration(r.HTTPTimeout) return nil } // Gather Reads stats from all configured servers. func (r *Fireboard) Gather(acc telegraf.Accumulator) error { - // Perform the GET request to the fireboard servers req, err := http.NewRequest("GET", r.URL, nil) if err != nil { diff --git a/plugins/inputs/fireboard/fireboard_test.go b/plugins/inputs/fireboard/fireboard_test.go index a5e93a4533e59..8fe1c21bd757d 100644 --- a/plugins/inputs/fireboard/fireboard_test.go +++ b/plugins/inputs/fireboard/fireboard_test.go @@ -16,7 +16,8 @@ func TestFireboard(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 7d4a0cd5eecb4..9ebd1682a56b7 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -3,7 +3,7 @@ package fluentd import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -62,15 +62,12 @@ func parse(data []byte) (datapointArray []pluginData, err error) { var endpointData endpointInfo if err = json.Unmarshal(data, &endpointData); err != nil { - err = fmt.Errorf("Processing JSON structure") - return + err = fmt.Errorf("processing JSON structure") + return nil, err } - for _, point := range endpointData.Payload { - datapointArray = append(datapointArray, point) - } - - return + datapointArray = append(datapointArray, endpointData.Payload...) + return datapointArray, err } // Description - display description @@ -81,21 +78,19 @@ func (h *Fluentd) SampleConfig() string { return sampleConfig } // Gather - Main code responsible for gathering, processing and creating metrics func (h *Fluentd) Gather(acc telegraf.Accumulator) error { - _, err := url.Parse(h.Endpoint) if err != nil { - return fmt.Errorf("Invalid URL \"%s\"", h.Endpoint) + return fmt.Errorf("invalid URL \"%s\"", h.Endpoint) } if h.client == nil { - tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } h.client = client @@ -104,15 +99,15 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { resp, err := h.client.Get(h.Endpoint) if err != nil { - return fmt.Errorf("Unable to perform HTTP client GET on \"%s\": %s", h.Endpoint, err) + return fmt.Errorf("unable to perform HTTP client GET on \"%s\": %v", h.Endpoint, err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("Unable to read the HTTP body \"%s\": %s", string(body), err) + return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) } if resp.StatusCode != http.StatusOK { @@ -122,12 +117,11 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { dataPoints, err := parse(body) if err != nil { - return fmt.Errorf("Problem with parsing") + return fmt.Errorf("problem with parsing") } // Go through all plugins one by one for _, p := range dataPoints { - skip := false // Check if this specific type was excluded in configuration @@ -149,7 +143,6 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { if p.BufferQueueLength != nil { tmpFields["buffer_queue_length"] = *p.BufferQueueLength - } if p.RetryCount != nil { tmpFields["retry_count"] = *p.RetryCount diff --git a/plugins/inputs/fluentd/fluentd_test.go b/plugins/inputs/fluentd/fluentd_test.go index c7699c3384906..a822c763f1402 100644 --- a/plugins/inputs/fluentd/fluentd_test.go +++ b/plugins/inputs/fluentd/fluentd_test.go @@ -8,8 +8,9 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) // sampleJSON from fluentd version '0.14.9' @@ -96,14 +97,12 @@ const sampleJSON = ` var ( zero float64 - err error - pluginOutput []pluginData expectedOutput = []pluginData{ // {"object:f48698", "dummy", "input", nil, nil, nil}, // {"object:e27138", "dummy", "input", nil, nil, nil}, // {"object:d74060", "monitor_agent", "input", nil, nil, nil}, - {"object:11a5e2c", "stdout", "output", (*float64)(&zero), nil, nil}, - {"object:11237ec", "s3", "output", (*float64)(&zero), (*float64)(&zero), (*float64)(&zero)}, + {"object:11a5e2c", "stdout", "output", &zero, nil, nil}, + {"object:11237ec", "s3", "output", &zero, &zero, &zero}, } fluentdTest = &Fluentd{ Endpoint: "http://localhost:8081", @@ -111,14 +110,12 @@ var ( ) func Test_parse(t *testing.T) { - t.Log("Testing parser function") _, err := parse([]byte(sampleJSON)) if err != nil { t.Error(err) } - } func Test_Gather(t *testing.T) { @@ -126,10 +123,13 @@ func Test_Gather(t *testing.T) { ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, "%s", string(sampleJSON)) + _, err := fmt.Fprintf(w, "%s", string(sampleJSON)) + require.NoError(t, err) })) requestURL, err := url.Parse(fluentdTest.Endpoint) + require.NoError(t, err) + require.NotNil(t, requestURL) ts.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) @@ -148,16 +148,15 @@ func Test_Gather(t *testing.T) { t.Errorf("acc.HasMeasurement: expected fluentd") } - assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"]) - assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"]) - assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"]) - assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"]) - - assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"]) - assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"]) - assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"]) - assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"]) - assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"]) - assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"]) - + require.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"]) + require.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"]) + require.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"]) + + require.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"]) + require.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"]) + require.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"]) + require.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"]) + require.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"]) } diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 46127082e69c5..a920a48f54e1d 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -23,6 +23,14 @@ alternative method for collecting repository information. ## Timeout for HTTP requests. # http_timeout = "5s" + + ## List of additional fields to query. + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + # additional_fields = [] ``` ### Metrics @@ -52,11 +60,21 @@ When the [internal][] input is enabled: - remaining - How many requests you have remaining (per hour) - blocks - How many requests have been blocked due to rate limit +When specifying `additional_fields` the plugin will collect the specified properties. +**NOTE:** Querying this additional fields might require to perform additional API-calls. +Please make sure you don't exceed the query rate-limit by specifying too many additional fields. +In the following we list the available options with the required API-calls and the resulting fields + +- "pull-requests" (2 API-calls per repository) + - fields: + - open_pull_requests (int) + - closed_pull_requests (int) + ### Example Output ``` github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 -internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 +internal_github,access_token=Unauthenticated closed_pull_requests=3522i,rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i,open_pull_requests=260i 1552653551000000000 ``` [GitHub]: https://www.github.com diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index a26923f3f305c..31fcc56aecdae 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -8,21 +8,23 @@ import ( "sync" "time" - "github.com/google/go-github/v32/github" + githubLib "github.com/google/go-github/v32/github" + "golang.org/x/oauth2" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" - "golang.org/x/oauth2" ) // GitHub - plugin main structure type GitHub struct { - Repositories []string `toml:"repositories"` - AccessToken string `toml:"access_token"` - EnterpriseBaseURL string `toml:"enterprise_base_url"` - HTTPTimeout internal.Duration `toml:"http_timeout"` - githubClient *github.Client + Repositories []string `toml:"repositories"` + AccessToken string `toml:"access_token"` + AdditionalFields []string `toml:"additional_fields"` + EnterpriseBaseURL string `toml:"enterprise_base_url"` + HTTPTimeout config.Duration `toml:"http_timeout"` + githubClient *githubLib.Client obfuscatedToken string @@ -46,6 +48,14 @@ const sampleConfig = ` ## Timeout for HTTP requests. # http_timeout = "5s" + + ## List of additional fields to query. + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + # additional_fields = [] ` // SampleConfig returns sample configuration for this plugin. @@ -59,12 +69,12 @@ func (g *GitHub) Description() string { } // Create GitHub Client -func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) { +func (g *GitHub) createGitHubClient(ctx context.Context) (*githubLib.Client, error) { httpClient := &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: g.HTTPTimeout.Duration, + Timeout: time.Duration(g.HTTPTimeout), } g.obfuscatedToken = "Unauthenticated" @@ -84,11 +94,11 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) return g.newGithubClient(httpClient) } -func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) { +func (g *GitHub) newGithubClient(httpClient *http.Client) (*githubLib.Client, error) { if g.EnterpriseBaseURL != "" { - return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) + return githubLib.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) } - return github.NewClient(httpClient), nil + return githubLib.NewClient(httpClient), nil } // Gather GitHub Metrics @@ -97,7 +107,6 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { if g.githubClient == nil { githubClient, err := g.createGitHubClient(ctx) - if err != nil { return err } @@ -127,23 +136,35 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { } repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository) - - if _, ok := err.(*github.RateLimitError); ok { - g.RateLimitErrors.Incr(1) - } - + g.handleRateLimit(response, err) if err != nil { acc.AddError(err) return } - g.RateLimit.Set(int64(response.Rate.Limit)) - g.RateRemaining.Set(int64(response.Rate.Remaining)) - now := time.Now() tags := getTags(repositoryInfo) fields := getFields(repositoryInfo) + for _, field := range g.AdditionalFields { + switch field { + case "pull-requests": + // Pull request properties + addFields, err := g.getPullRequestFields(ctx, owner, repository) + if err != nil { + acc.AddError(err) + continue + } + + for k, v := range addFields { + fields[k] = v + } + default: + acc.AddError(fmt.Errorf("unknown additional field %q", field)) + continue + } + } + acc.AddFields("github_repository", fields, tags, now) }(repository, acc) } @@ -152,7 +173,16 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { return nil } -func splitRepositoryName(repositoryName string) (string, string, error) { +func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) { + if err == nil { + g.RateLimit.Set(int64(response.Rate.Limit)) + g.RateRemaining.Set(int64(response.Rate.Remaining)) + } else if _, ok := err.(*githubLib.RateLimitError); ok { + g.RateLimitErrors.Incr(1) + } +} + +func splitRepositoryName(repositoryName string) (owner string, repository string, err error) { splits := strings.SplitN(repositoryName, "/", 2) if len(splits) != 2 { @@ -162,7 +192,7 @@ func splitRepositoryName(repositoryName string) (string, string, error) { return splits[0], splits[1], nil } -func getLicense(rI *github.Repository) string { +func getLicense(rI *githubLib.Repository) string { if licenseName := rI.GetLicense().GetName(); licenseName != "" { return licenseName } @@ -170,7 +200,7 @@ func getLicense(rI *github.Repository) string { return "None" } -func getTags(repositoryInfo *github.Repository) map[string]string { +func getTags(repositoryInfo *githubLib.Repository) map[string]string { return map[string]string{ "owner": repositoryInfo.GetOwner().GetLogin(), "name": repositoryInfo.GetName(), @@ -179,7 +209,7 @@ func getTags(repositoryInfo *github.Repository) map[string]string { } } -func getFields(repositoryInfo *github.Repository) map[string]interface{} { +func getFields(repositoryInfo *githubLib.Repository) map[string]interface{} { return map[string]interface{}{ "stars": repositoryInfo.GetStargazersCount(), "subscribers": repositoryInfo.GetSubscribersCount(), @@ -191,10 +221,36 @@ func getFields(repositoryInfo *github.Repository) map[string]interface{} { } } +func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (map[string]interface{}, error) { + options := githubLib.SearchOptions{ + TextMatch: false, + ListOptions: githubLib.ListOptions{ + PerPage: 100, + Page: 1, + }, + } + + classes := []string{"open", "closed"} + fields := make(map[string]interface{}) + for _, class := range classes { + q := fmt.Sprintf("repo:%s/%s is:pr is:%s", owner, repo, class) + searchResult, response, err := g.githubClient.Search.Issues(ctx, q, &options) + g.handleRateLimit(response, err) + if err != nil { + return fields, err + } + + f := fmt.Sprintf("%s_pull_requests", class) + fields[f] = searchResult.GetTotal() + } + + return fields, nil +} + func init() { inputs.Add("github", func() telegraf.Input { return &GitHub{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index 7387e566dd21b..aa940f76d4e14 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -17,7 +17,7 @@ It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64- username = "cisco" password = "cisco" - ## gNMI encoding requested (one of: "proto", "json", "json_ietf") + ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") # encoding = "proto" ## redial in case of failures after diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 3c5826ba40033..a6a3c3a2c6ef3 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -14,16 +14,17 @@ import ( "sync" "time" + gnmiLib "github.com/openconfig/gnmi/proto/gnmi" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" - "github.com/openconfig/gnmi/proto/gnmi" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" ) // gNMI plugin instance @@ -44,17 +45,17 @@ type GNMI struct { Password string // Redial - Redial internal.Duration + Redial config.Duration // GRPC TLS settings EnableTLS bool `toml:"enable_tls"` internaltls.ClientConfig // Internal state - aliases map[string]string - acc telegraf.Accumulator - cancel context.CancelFunc - wg sync.WaitGroup + internalAliases map[string]string + acc telegraf.Accumulator + cancel context.CancelFunc + wg sync.WaitGroup Log telegraf.Logger } @@ -66,12 +67,12 @@ type Subscription struct { Path string // Subscription mode and interval - SubscriptionMode string `toml:"subscription_mode"` - SampleInterval internal.Duration `toml:"sample_interval"` + SubscriptionMode string `toml:"subscription_mode"` + SampleInterval config.Duration `toml:"sample_interval"` // Duplicate suppression - SuppressRedundant bool `toml:"suppress_redundant"` - HeartbeatInterval internal.Duration `toml:"heartbeat_interval"` + SuppressRedundant bool `toml:"suppress_redundant"` + HeartbeatInterval config.Duration `toml:"heartbeat_interval"` } // Start the http listener service @@ -79,14 +80,14 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { var err error var ctx context.Context var tlscfg *tls.Config - var request *gnmi.SubscribeRequest + var request *gnmiLib.SubscribeRequest c.acc = acc ctx, c.cancel = context.WithCancel(context.Background()) // Validate configuration if request, err = c.newSubscribeRequest(); err != nil { return err - } else if c.Redial.Duration.Nanoseconds() <= 0 { + } else if time.Duration(c.Redial).Nanoseconds() <= 0 { return fmt.Errorf("redial duration must be positive") } @@ -102,9 +103,9 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { } // Invert explicit alias list and prefill subscription names - c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) + c.internalAliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) for _, subscription := range c.Subscriptions { - var gnmiLongPath, gnmiShortPath *gnmi.Path + var gnmiLongPath, gnmiShortPath *gnmiLib.Path // Build the subscription path without keys if gnmiLongPath, err = parsePath(subscription.Origin, subscription.Path, ""); err != nil { @@ -114,8 +115,14 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { return err } - longPath, _ := c.handlePath(gnmiLongPath, nil, "") - shortPath, _ := c.handlePath(gnmiShortPath, nil, "") + longPath, _, err := c.handlePath(gnmiLongPath, nil, "") + if err != nil { + return fmt.Errorf("handling long-path failed: %v", err) + } + shortPath, _, err := c.handlePath(gnmiShortPath, nil, "") + if err != nil { + return fmt.Errorf("handling short-path failed: %v", err) + } name := subscription.Name // If the user didn't provide a measurement name, use last path element @@ -123,12 +130,12 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { name = path.Base(shortPath) } if len(name) > 0 { - c.aliases[longPath] = name - c.aliases[shortPath] = name + c.internalAliases[longPath] = name + c.internalAliases[shortPath] = name } } - for alias, path := range c.Aliases { - c.aliases[path] = alias + for alias, encodingPath := range c.Aliases { + c.internalAliases[encodingPath] = alias } // Create a goroutine for each device, dial and subscribe @@ -143,7 +150,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { select { case <-ctx.Done(): - case <-time.After(c.Redial.Duration): + case <-time.After(time.Duration(c.Redial)): } } }(addr) @@ -152,24 +159,24 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { } // Create a new gNMI SubscribeRequest -func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { +func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) { // Create subscription objects - subscriptions := make([]*gnmi.Subscription, len(c.Subscriptions)) + subscriptions := make([]*gnmiLib.Subscription, len(c.Subscriptions)) for i, subscription := range c.Subscriptions { gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "") if err != nil { return nil, err } - mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] + mode, ok := gnmiLib.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] if !ok { return nil, fmt.Errorf("invalid subscription mode %s", subscription.SubscriptionMode) } - subscriptions[i] = &gnmi.Subscription{ + subscriptions[i] = &gnmiLib.Subscription{ Path: gnmiPath, - Mode: gnmi.SubscriptionMode(mode), - SampleInterval: uint64(subscription.SampleInterval.Duration.Nanoseconds()), + Mode: gnmiLib.SubscriptionMode(mode), + SampleInterval: uint64(time.Duration(subscription.SampleInterval).Nanoseconds()), SuppressRedundant: subscription.SuppressRedundant, - HeartbeatInterval: uint64(subscription.HeartbeatInterval.Duration.Nanoseconds()), + HeartbeatInterval: uint64(time.Duration(subscription.HeartbeatInterval).Nanoseconds()), } } @@ -179,16 +186,16 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { return nil, err } - if c.Encoding != "proto" && c.Encoding != "json" && c.Encoding != "json_ietf" { + if c.Encoding != "proto" && c.Encoding != "json" && c.Encoding != "json_ietf" && c.Encoding != "bytes" { return nil, fmt.Errorf("unsupported encoding %s", c.Encoding) } - return &gnmi.SubscribeRequest{ - Request: &gnmi.SubscribeRequest_Subscribe{ - Subscribe: &gnmi.SubscriptionList{ + return &gnmiLib.SubscribeRequest{ + Request: &gnmiLib.SubscribeRequest_Subscribe{ + Subscribe: &gnmiLib.SubscriptionList{ Prefix: gnmiPath, - Mode: gnmi.SubscriptionList_STREAM, - Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]), + Mode: gnmiLib.SubscriptionList_STREAM, + Encoding: gnmiLib.Encoding(gnmiLib.Encoding_value[strings.ToUpper(c.Encoding)]), Subscription: subscriptions, UpdatesOnly: c.UpdatesOnly, }, @@ -197,7 +204,7 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { } // SubscribeGNMI and extract telemetry data -func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error { +func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmiLib.SubscribeRequest) error { var opt grpc.DialOption if tlscfg != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(tlscfg)) @@ -211,7 +218,7 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co } defer client.Close() - subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx) + subscribeClient, err := gnmiLib.NewGNMIClient(client).Subscribe(ctx) if err != nil { return fmt.Errorf("failed to setup subscription: %v", err) } @@ -227,7 +234,7 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co c.Log.Debugf("Connection to gNMI device %s established", address) defer c.Log.Debugf("Connection to gNMI device %s closed", address) for ctx.Err() == nil { - var reply *gnmi.SubscribeResponse + var reply *gnmiLib.SubscribeResponse if reply, err = subscribeClient.Recv(); err != nil { if err != io.EOF && ctx.Err() == nil { return fmt.Errorf("aborted gNMI subscription: %v", err) @@ -240,21 +247,27 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co return nil } -// HandleSubscribeResponse message from gNMI and parse contained telemetry data -func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResponse) { - // Check if response is a gNMI Update and if we have a prefix to derive the measurement name - response, ok := reply.Response.(*gnmi.SubscribeResponse_Update) - if !ok { - return +func (c *GNMI) handleSubscribeResponse(address string, reply *gnmiLib.SubscribeResponse) { + switch response := reply.Response.(type) { + case *gnmiLib.SubscribeResponse_Update: + c.handleSubscribeResponseUpdate(address, response) + case *gnmiLib.SubscribeResponse_Error: + c.Log.Errorf("Subscribe error (%d), %q", response.Error.Code, response.Error.Message) } +} +// Handle SubscribeResponse_Update message from gNMI and parse contained telemetry data +func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmiLib.SubscribeResponse_Update) { var prefix, prefixAliasPath string grouper := metric.NewSeriesGrouper() timestamp := time.Unix(0, response.Update.Timestamp) prefixTags := make(map[string]string) if response.Update.Prefix != nil { - prefix, prefixAliasPath = c.handlePath(response.Update.Prefix, prefixTags, "") + var err error + if prefix, prefixAliasPath, err = c.handlePath(response.Update.Prefix, prefixTags, ""); err != nil { + c.Log.Errorf("handling path %q failed: %v", response.Update.Prefix, err) + } } prefixTags["source"], _, _ = net.SplitHostPort(address) prefixTags["path"] = prefix @@ -277,7 +290,7 @@ func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResp // Lookup alias if alias-path has changed if aliasPath != lastAliasPath { name = prefix - if alias, ok := c.aliases[aliasPath]; ok { + if alias, ok := c.internalAliases[aliasPath]; ok { name = alias } else { c.Log.Debugf("No measurement alias for gNMI path: %s", name) @@ -287,11 +300,11 @@ func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResp // Group metrics for k, v := range fields { key := k - if len(aliasPath) < len(key) { + if len(aliasPath) < len(key) && len(aliasPath) != 0 { // This may not be an exact prefix, due to naming style // conversion on the key. key = key[len(aliasPath)+1:] - } else { + } else if len(aliasPath) >= len(key) { // Otherwise use the last path element as the field key. key = path.Base(key) @@ -304,55 +317,60 @@ func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResp } } - grouper.Add(name, tags, timestamp, key, v) + if err := grouper.Add(name, tags, timestamp, key, v); err != nil { + c.Log.Errorf("cannot add to grouper: %v", err) + } } lastAliasPath = aliasPath } // Add grouped measurements - for _, metric := range grouper.Metrics() { - c.acc.AddMetric(metric) + for _, metricToAdd := range grouper.Metrics() { + c.acc.AddMetric(metricToAdd) } } // HandleTelemetryField and add it to a measurement -func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { - path, aliasPath := c.handlePath(update.Path, tags, prefix) +func (c *GNMI) handleTelemetryField(update *gnmiLib.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { + gpath, aliasPath, err := c.handlePath(update.Path, tags, prefix) + if err != nil { + c.Log.Errorf("handling path %q failed: %v", update.Path, err) + } var value interface{} var jsondata []byte // Make sure a value is actually set if update.Val == nil || update.Val.Value == nil { - c.Log.Infof("Discarded empty or legacy type value with path: %q", path) + c.Log.Infof("Discarded empty or legacy type value with path: %q", gpath) return aliasPath, nil } switch val := update.Val.Value.(type) { - case *gnmi.TypedValue_AsciiVal: + case *gnmiLib.TypedValue_AsciiVal: value = val.AsciiVal - case *gnmi.TypedValue_BoolVal: + case *gnmiLib.TypedValue_BoolVal: value = val.BoolVal - case *gnmi.TypedValue_BytesVal: + case *gnmiLib.TypedValue_BytesVal: value = val.BytesVal - case *gnmi.TypedValue_DecimalVal: + case *gnmiLib.TypedValue_DecimalVal: value = float64(val.DecimalVal.Digits) / math.Pow(10, float64(val.DecimalVal.Precision)) - case *gnmi.TypedValue_FloatVal: + case *gnmiLib.TypedValue_FloatVal: value = val.FloatVal - case *gnmi.TypedValue_IntVal: + case *gnmiLib.TypedValue_IntVal: value = val.IntVal - case *gnmi.TypedValue_StringVal: + case *gnmiLib.TypedValue_StringVal: value = val.StringVal - case *gnmi.TypedValue_UintVal: + case *gnmiLib.TypedValue_UintVal: value = val.UintVal - case *gnmi.TypedValue_JsonIetfVal: + case *gnmiLib.TypedValue_JsonIetfVal: jsondata = val.JsonIetfVal - case *gnmi.TypedValue_JsonVal: + case *gnmiLib.TypedValue_JsonVal: jsondata = val.JsonVal } - name := strings.Replace(path, "-", "_", -1) + name := strings.Replace(gpath, "-", "_", -1) fields := make(map[string]interface{}) if value != nil { fields[name] = value @@ -361,32 +379,41 @@ func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, c.acc.AddError(fmt.Errorf("failed to parse JSON value: %v", err)) } else { flattener := jsonparser.JSONFlattener{Fields: fields} - flattener.FullFlattenJSON(name, value, true, true) + if err := flattener.FullFlattenJSON(name, value, true, true); err != nil { + c.acc.AddError(fmt.Errorf("failed to flatten JSON: %v", err)) + } } } return aliasPath, fields } // Parse path to path-buffer and tag-field -func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string) { - var aliasPath string +func (c *GNMI) handlePath(gnmiPath *gnmiLib.Path, tags map[string]string, prefix string) (pathBuffer string, aliasPath string, err error) { builder := bytes.NewBufferString(prefix) // Prefix with origin - if len(path.Origin) > 0 { - builder.WriteString(path.Origin) - builder.WriteRune(':') + if len(gnmiPath.Origin) > 0 { + if _, err := builder.WriteString(gnmiPath.Origin); err != nil { + return "", "", err + } + if _, err := builder.WriteRune(':'); err != nil { + return "", "", err + } } // Parse generic keys from prefix - for _, elem := range path.Elem { + for _, elem := range gnmiPath.Elem { if len(elem.Name) > 0 { - builder.WriteRune('/') - builder.WriteString(elem.Name) + if _, err := builder.WriteRune('/'); err != nil { + return "", "", err + } + if _, err := builder.WriteString(elem.Name); err != nil { + return "", "", err + } } name := builder.String() - if _, exists := c.aliases[name]; exists { + if _, exists := c.internalAliases[name]; exists { aliasPath = name } @@ -400,30 +427,29 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string } else { tags[key] = val } - } } } - return builder.String(), aliasPath + return builder.String(), aliasPath, nil } //ParsePath from XPath-like string to gNMI path structure -func parsePath(origin string, path string, target string) (*gnmi.Path, error) { +func parsePath(origin string, pathToParse string, target string) (*gnmiLib.Path, error) { var err error - gnmiPath := gnmi.Path{Origin: origin, Target: target} + gnmiPath := gnmiLib.Path{Origin: origin, Target: target} - if len(path) > 0 && path[0] != '/' { - return nil, fmt.Errorf("path does not start with a '/': %s", path) + if len(pathToParse) > 0 && pathToParse[0] != '/' { + return nil, fmt.Errorf("path does not start with a '/': %s", pathToParse) } - elem := &gnmi.PathElem{} + elem := &gnmiLib.PathElem{} start, name, value, end := 0, -1, -1, -1 - path = path + "/" + pathToParse = pathToParse + "/" - for i := 0; i < len(path); i++ { - if path[i] == '[' { + for i := 0; i < len(pathToParse); i++ { + if pathToParse[i] == '[' { if name >= 0 { break } @@ -432,37 +458,37 @@ func parsePath(origin string, path string, target string) (*gnmi.Path, error) { elem.Key = make(map[string]string) } name = i + 1 - } else if path[i] == '=' { + } else if pathToParse[i] == '=' { if name <= 0 || value >= 0 { break } value = i + 1 - } else if path[i] == ']' { + } else if pathToParse[i] == ']' { if name <= 0 || value <= name { break } - elem.Key[path[name:value-1]] = strings.Trim(path[value:i], "'\"") + elem.Key[pathToParse[name:value-1]] = strings.Trim(pathToParse[value:i], "'\"") name, value = -1, -1 - } else if path[i] == '/' { + } else if pathToParse[i] == '/' { if name < 0 { if end < 0 { end = i } if end > start { - elem.Name = path[start:end] + elem.Name = pathToParse[start:end] gnmiPath.Elem = append(gnmiPath.Elem, elem) - gnmiPath.Element = append(gnmiPath.Element, path[start:i]) + gnmiPath.Element = append(gnmiPath.Element, pathToParse[start:i]) } start, name, value, end = i+1, -1, -1, -1 - elem = &gnmi.PathElem{} + elem = &gnmiLib.PathElem{} } } } if name >= 0 || value >= 0 { - err = fmt.Errorf("Invalid gNMI path: %s", path) + err = fmt.Errorf("Invalid gNMI path: %s", pathToParse) } if err != nil { @@ -486,7 +512,7 @@ const sampleConfig = ` username = "cisco" password = "cisco" - ## gNMI encoding requested (one of: "proto", "json", "json_ietf") + ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") # encoding = "proto" ## redial in case of failures after @@ -553,7 +579,7 @@ func (c *GNMI) Gather(_ telegraf.Accumulator) error { func New() telegraf.Input { return &GNMI{ Encoding: "proto", - Redial: internal.Duration{Duration: 10 * time.Second}, + Redial: config.Duration(10 * time.Second), } } diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index c74fbcd4a5164..17a955c4875dc 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -9,54 +9,54 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" - "github.com/openconfig/gnmi/proto/gnmi" - "github.com/stretchr/testify/assert" + gnmiLib "github.com/openconfig/gnmi/proto/gnmi" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) func TestParsePath(t *testing.T) { path := "/foo/bar/bla[shoo=woo][shoop=/woop/]/z" parsed, err := parsePath("theorigin", path, "thetarget") - assert.NoError(t, err) - assert.Equal(t, parsed.Origin, "theorigin") - assert.Equal(t, parsed.Target, "thetarget") - assert.Equal(t, parsed.Element, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"}) - assert.Equal(t, parsed.Elem, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"}, - {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}) + require.NoError(t, err) + require.Equal(t, "theorigin", parsed.Origin) + require.Equal(t, "thetarget", parsed.Target) + require.Equal(t, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"}, parsed.Element) + require.Equal(t, []*gnmiLib.PathElem{{Name: "foo"}, {Name: "bar"}, + {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}, parsed.Elem) parsed, err = parsePath("", "", "") - assert.NoError(t, err) - assert.Equal(t, *parsed, gnmi.Path{}) + require.NoError(t, err) + require.Equal(t, gnmiLib.Path{}, *parsed) parsed, err = parsePath("", "/foo[[", "") - assert.Nil(t, parsed) - assert.Equal(t, errors.New("Invalid gNMI path: /foo[[/"), err) + require.Nil(t, parsed) + require.Equal(t, errors.New("Invalid gNMI path: /foo[[/"), err) } type MockServer struct { - SubscribeF func(gnmi.GNMI_SubscribeServer) error + SubscribeF func(gnmiLib.GNMI_SubscribeServer) error GRPCServer *grpc.Server } -func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { +func (s *MockServer) Capabilities(context.Context, *gnmiLib.CapabilityRequest) (*gnmiLib.CapabilityResponse, error) { return nil, nil } -func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { +func (s *MockServer) Get(context.Context, *gnmiLib.GetRequest) (*gnmiLib.GetResponse, error) { return nil, nil } -func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { +func (s *MockServer) Set(context.Context, *gnmiLib.SetRequest) (*gnmiLib.SetResponse, error) { return nil, nil } -func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { +func (s *MockServer) Subscribe(server gnmiLib.GNMI_SubscribeServer) error { return s.SubscribeF(server) } @@ -66,18 +66,18 @@ func TestWaitError(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { return fmt.Errorf("testerror") }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), } var acc testutil.Accumulator @@ -107,7 +107,7 @@ func TestUsernamePassword(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { metadata, ok := metadata.FromIncomingContext(server.Context()) if !ok { return errors.New("failed to get metadata") @@ -127,7 +127,7 @@ func TestUsernamePassword(t *testing.T) { }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, @@ -135,7 +135,7 @@ func TestUsernamePassword(t *testing.T) { Username: "theusername", Password: "thepassword", Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), } var acc testutil.Accumulator @@ -159,12 +159,12 @@ func TestUsernamePassword(t *testing.T) { errors.New("aborted gNMI subscription: rpc error: code = Unknown desc = success")) } -func mockGNMINotification() *gnmi.Notification { - return &gnmi.Notification{ +func mockGNMINotification() *gnmiLib.Notification { + return &gnmiLib.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmi.Path{ + Prefix: &gnmiLib.Path{ Origin: "type", - Elem: []*gnmi.PathElem{ + Elem: []*gnmiLib.PathElem{ { Name: "model", Key: map[string]string{"foo": "bar"}, @@ -172,35 +172,35 @@ func mockGNMINotification() *gnmi.Notification { }, Target: "subscription", }, - Update: []*gnmi.Update{ + Update: []*gnmiLib.Update{ { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "some"}, { Name: "path", Key: map[string]string{"name": "str", "uint64": "1234"}}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_IntVal{IntVal: 5678}}, }, { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "other"}, {Name: "path"}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "foobar"}}, }, { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "other"}, {Name: "this"}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "that"}}, }, }, } @@ -218,7 +218,7 @@ func TestNotification(t *testing.T) { plugin: &GNMI{ Log: testutil.Logger{}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), Subscriptions: []Subscription{ { Name: "alias", @@ -229,15 +229,20 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) + if err != nil { + return err + } + err = server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}) + if err != nil { + return err + } notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, }, expected: []telegraf.Metric{ @@ -302,7 +307,7 @@ func TestNotification(t *testing.T) { plugin: &GNMI{ Log: testutil.Logger{}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), Subscriptions: []Subscription{ { Name: "PHY_COUNTERS", @@ -313,14 +318,14 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { - response := &gnmi.SubscribeResponse{ - Response: &gnmi.SubscribeResponse_Update{ - Update: &gnmi.Notification{ + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + response := &gnmiLib.SubscribeResponse{ + Response: &gnmiLib.SubscribeResponse_Update{ + Update: &gnmiLib.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmi.Path{ + Prefix: &gnmiLib.Path{ Origin: "type", - Elem: []*gnmi.PathElem{ + Elem: []*gnmiLib.PathElem{ { Name: "state", }, @@ -337,19 +342,18 @@ func TestNotification(t *testing.T) { }, Target: "subscription", }, - Update: []*gnmi.Update{ + Update: []*gnmiLib.Update{ { - Path: &gnmi.Path{}, - Val: &gnmi.TypedValue{ - Value: &gnmi.TypedValue_IntVal{IntVal: 42}, + Path: &gnmiLib.Path{}, + Val: &gnmiLib.TypedValue{ + Value: &gnmiLib.TypedValue_IntVal{IntVal: 42}, }, }, }, }, }, } - server.Send(response) - return nil + return server.Send(response) }, }, expected: []telegraf.Metric{ @@ -378,7 +382,7 @@ func TestNotification(t *testing.T) { grpcServer := grpc.NewServer() tt.server.GRPCServer = grpcServer - gnmi.RegisterGNMIServer(grpcServer, tt.server) + gnmiLib.RegisterGNMIServer(grpcServer, tt.server) var acc testutil.Accumulator err = tt.plugin.Start(&acc) @@ -403,6 +407,29 @@ func TestNotification(t *testing.T) { } } +type MockLogger struct { + telegraf.Logger + lastFormat string + lastArgs []interface{} +} + +func (l *MockLogger) Errorf(format string, args ...interface{}) { + l.lastFormat = format + l.lastArgs = args +} + +func TestSubscribeResponseError(t *testing.T) { + me := "mock error message" + var mc uint32 = 7 + ml := &MockLogger{} + plugin := &GNMI{Log: ml} + // TODO: FIX SA1019: gnmi.Error is deprecated: Do not use. + errorResponse := &gnmiLib.SubscribeResponse_Error{Error: &gnmiLib.Error{Message: me, Code: mc}} + plugin.handleSubscribeResponse("127.0.0.1:0", &gnmiLib.SubscribeResponse{Response: errorResponse}) + require.NotEmpty(t, ml.lastFormat) + require.Equal(t, []interface{}{mc, me}, ml.lastArgs) +} + func TestRedial(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -411,19 +438,18 @@ func TestRedial(t *testing.T) { Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, Encoding: "proto", - Redial: internal.Duration{Duration: 10 * time.Millisecond}, + Redial: config.Duration(10 * time.Millisecond), } grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) var wg sync.WaitGroup wg.Add(1) @@ -447,17 +473,16 @@ func TestRedial(t *testing.T) { grpcServer = grpc.NewServer() gnmiServer = &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_BoolVal{BoolVal: false}} + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) wg.Add(1) go func() { diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index be5f8fc60aaa4..d522f5a49dfea 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -5,7 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -19,7 +19,6 @@ import ( ) type ResponseMetrics struct { - total int Metrics []Metric `json:"metrics"` } @@ -129,12 +128,12 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } h.client.SetHTTPClient(client) } @@ -178,16 +177,16 @@ func (h *GrayLog) gatherServer( if err := json.Unmarshal([]byte(resp), &dat); err != nil { return err } - for _, m_item := range dat.Metrics { + for _, mItem := range dat.Metrics { fields := make(map[string]interface{}) tags := map[string]string{ "server": host, "port": port, - "name": m_item.Name, - "type": m_item.Type, + "name": mItem.Name, + "type": mItem.Type, } - h.flatten(m_item.Fields, fields, "") - acc.AddFields(m_item.FullName, fields, tags) + h.flatten(mItem.Fields, fields, "") + acc.AddFields(mItem.FullName, fields, tags) } return nil } @@ -204,13 +203,13 @@ func (h *GrayLog) flatten(item map[string]interface{}, fields map[string]interfa id = id + "_" } for k, i := range item { - switch i.(type) { + switch i := i.(type) { case int: - fields[id+k] = i.(float64) + fields[id+k] = float64(i) case float64: - fields[id+k] = i.(float64) + fields[id+k] = i case map[string]interface{}: - h.flatten(i.(map[string]interface{}), fields, id+k) + h.flatten(i, fields, id+k) default: } } @@ -234,19 +233,19 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { - return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) + return "", -1, fmt.Errorf("invalid server URL \"%s\"", serverURL) } // Add X-Requested-By header headers["X-Requested-By"] = "Telegraf" if strings.Contains(requestURL.String(), "multiple") { m := &Messagebody{Metrics: h.Metrics} - http_body, err := json.Marshal(m) + httpBody, err := json.Marshal(m) if err != nil { - return "", -1, fmt.Errorf("Invalid list of Metrics %s", h.Metrics) + return "", -1, fmt.Errorf("invalid list of Metrics %s", h.Metrics) } method = "POST" - content = bytes.NewBuffer(http_body) + content = bytes.NewBuffer(httpBody) } req, err := http.NewRequest(method, requestURL.String(), content) if err != nil { @@ -265,14 +264,14 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", requestURL.String(), resp.StatusCode, http.StatusText(resp.StatusCode), diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index f8008f1d94c66..5739969e3df01 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -1,7 +1,7 @@ package graylog import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -115,7 +115,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 9ec9512ea170c..f95dbcc9f1045 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -61,25 +61,24 @@ var sampleConfig = ` # insecure_skip_verify = false ` -func (r *haproxy) SampleConfig() string { +func (h *haproxy) SampleConfig() string { return sampleConfig } -func (r *haproxy) Description() string { +func (h *haproxy) Description() string { return "Read metrics of haproxy, via socket or csv stats page" } // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *haproxy) Gather(acc telegraf.Accumulator) error { - if len(g.Servers) == 0 { - return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) +func (h *haproxy) Gather(acc telegraf.Accumulator) error { + if len(h.Servers) == 0 { + return h.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) } - endpoints := make([]string, 0, len(g.Servers)) - - for _, endpoint := range g.Servers { + endpoints := make([]string, 0, len(h.Servers)) + for _, endpoint := range h.Servers { if strings.HasPrefix(endpoint, "http") { endpoints = append(endpoints, endpoint) continue @@ -96,9 +95,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { if len(matches) == 0 { endpoints = append(endpoints, socketPath) } else { - for _, match := range matches { - endpoints = append(endpoints, match) - } + endpoints = append(endpoints, matches...) } } @@ -107,7 +104,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { for _, server := range endpoints { go func(serv string) { defer wg.Done() - if err := g.gatherServer(serv, acc); err != nil { + if err := h.gatherServer(serv, acc); err != nil { acc.AddError(err) } }(server) @@ -117,43 +114,43 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { return nil } -func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { +func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { socketPath := getSocketAddr(addr) c, err := net.Dial("unix", socketPath) if err != nil { - return fmt.Errorf("Could not connect to socket '%s': %s", addr, err) + return fmt.Errorf("could not connect to socket '%s': %s", addr, err) } _, errw := c.Write([]byte("show stat\n")) if errw != nil { - return fmt.Errorf("Could not write to socket '%s': %s", addr, errw) + return fmt.Errorf("could not write to socket '%s': %s", addr, errw) } - return g.importCsvResult(c, acc, socketPath) + return h.importCsvResult(c, acc, socketPath) } -func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { +func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { if !strings.HasPrefix(addr, "http") { - return g.gatherServerSocket(addr, acc) + return h.gatherServerSocket(addr, acc) } - if g.client == nil { - tlsCfg, err := g.ClientConfig.TLSConfig() + if h.client == nil { + tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } - g.client = client + h.client = client } if !strings.HasSuffix(addr, ";csv") { @@ -176,11 +173,11 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { addr = u.String() } - if g.Username != "" || g.Password != "" { - req.SetBasicAuth(g.Username, g.Password) + if h.Username != "" || h.Password != "" { + req.SetBasicAuth(h.Username, h.Password) } - res, err := g.client.Do(req) + res, err := h.client.Do(req) if err != nil { return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err) } @@ -190,7 +187,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) } - if err := g.importCsvResult(res.Body, acc, u.Host); err != nil { + if err := h.importCsvResult(res.Body, acc, u.Host); err != nil { return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err) } @@ -202,9 +199,8 @@ func getSocketAddr(sock string) string { if len(socketAddr) >= 2 { return socketAddr[1] - } else { - return socketAddr[0] } + return socketAddr[0] } var typeNames = []string{"frontend", "backend", "server", "listener"} @@ -223,7 +219,7 @@ var fieldRenames = map[string]string{ "hrsp_other": "http_response.other", } -func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { +func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { csvr := csv.NewReader(r) now := time.Now() @@ -260,7 +256,7 @@ func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st colName := headers[i] fieldName := colName - if !g.KeepFieldNames { + if !h.KeepFieldNames { if fieldRename, ok := fieldRenames[colName]; ok { fieldName = fieldRename } diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index e05031f192675..21a1b09c10d02 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -7,12 +7,14 @@ import ( "net" "net/http" "net/http/httptest" + "os" + "path/filepath" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -25,13 +27,15 @@ func (s statServer) serverSocket(l net.Listener) { } go func(c net.Conn) { + defer c.Close() + buf := make([]byte, 1024) n, _ := c.Read(buf) data := buf[:n] if string(data) == "show stat\n" { + //nolint:errcheck,revive // we return anyway c.Write([]byte(csvOutputSample)) - c.Close() } }(conn) } @@ -43,15 +47,18 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { username, password, ok := r.BasicAuth() if !ok { w.WriteHeader(http.StatusNotFound) - fmt.Fprint(w, "Unauthorized") + _, err := fmt.Fprint(w, "Unauthorized") + require.NoError(t, err) return } if username == "user" && password == "password" { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) - fmt.Fprint(w, "Unauthorized") + _, err := fmt.Fprint(w, "Unauthorized") + require.NoError(t, err) } })) defer ts.Close() @@ -81,13 +88,14 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { Servers: []string{ts.URL}, } - r.Gather(&acc) + require.NoError(t, r.Gather(&acc)) require.NotEmpty(t, acc.Errors) } func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) })) defer ts.Close() @@ -97,8 +105,7 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "server": ts.Listener.Addr().String(), @@ -114,12 +121,13 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { var randomNumber int64 var sockets [5]net.Listener - _globmask := "/tmp/test-haproxy*.sock" - _badmask := "/tmp/test-fail-haproxy*.sock" + + _globmask := filepath.Join(os.TempDir(), "test-haproxy*.sock") + _badmask := filepath.Join(os.TempDir(), "test-fail-haproxy*.sock") for i := 0; i < 5; i++ { - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) - sockname := fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) + sockname := filepath.Join(os.TempDir(), fmt.Sprintf("test-haproxy%d.sock", randomNumber)) sock, err := net.Listen("unix", sockname) if err != nil { @@ -127,7 +135,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { } sockets[i] = sock - defer sock.Close() + defer sock.Close() //nolint:revive // done on purpose, closing will be executed properly s := statServer{} go s.serverSocket(sock) @@ -146,7 +154,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { for _, sock := range sockets { tags := map[string]string{ - "server": sock.Addr().String(), + "server": getSocketAddr(sock.Addr().String()), "proxy": "git", "sv": "www", "type": "server", @@ -158,7 +166,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { // This mask should not match any socket r.Servers = []string{_badmask} - r.Gather(&acc) + require.NoError(t, r.Gather(&acc)) require.NotEmpty(t, acc.Errors) } @@ -171,12 +179,13 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") + require.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") } func TestHaproxyKeepFieldNames(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) })) defer ts.Close() @@ -187,8 +196,7 @@ func TestHaproxyKeepFieldNames(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "server": ts.Listener.Addr().String(), diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go index a3fda2abd2013..41d513e4011e3 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go @@ -2,8 +2,9 @@ package hddtemp import ( "net" - "reflect" "testing" + + "github.com/stretchr/testify/require" ) func TestFetch(t *testing.T) { @@ -11,10 +12,7 @@ func TestFetch(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -24,18 +22,12 @@ func TestFetch(t *testing.T) { Unit: "C", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func TestFetchWrongAddress(t *testing.T) { _, err := New().Fetch("127.0.0.1:1") - - if err == nil { - t.Error("expecting err to be non-nil") - } + require.Error(t, err) } func TestFetchStatus(t *testing.T) { @@ -43,10 +35,7 @@ func TestFetchStatus(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -57,10 +46,7 @@ func TestFetchStatus(t *testing.T) { Status: "SLP", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func TestFetchTwoDisks(t *testing.T) { @@ -68,10 +54,7 @@ func TestFetchTwoDisks(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -88,28 +71,20 @@ func TestFetchTwoDisks(t *testing.T) { Status: "SLP", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func serve(t *testing.T, data []byte) net.Listener { l, err := net.Listen("tcp", "127.0.0.1:0") - - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) go func(t *testing.T) { conn, err := l.Accept() + require.NoError(t, err) - if err != nil { - t.Fatal(err) - } - - conn.Write(data) - conn.Close() + _, err = conn.Write(data) + require.NoError(t, err) + require.NoError(t, conn.Close()) }(t) return l diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go index 0f084ac219bff..2e6d3a53c00cd 100644 --- a/plugins/inputs/hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/hddtemp.go @@ -20,7 +20,7 @@ type Fetcher interface { Fetch(address string) ([]gohddtemp.Disk, error) } -func (_ *HDDTemp) Description() string { +func (h *HDDTemp) Description() string { return "Monitor disks' temperatures using hddtemp" } @@ -36,7 +36,7 @@ var hddtempSampleConfig = ` # devices = ["sda", "*"] ` -func (_ *HDDTemp) SampleConfig() string { +func (h *HDDTemp) SampleConfig() string { return hddtempSampleConfig } diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index f299c2ac66c4b..769022049d17a 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -3,16 +3,17 @@ package hddtemp import ( "testing" - hddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" + "github.com/influxdata/telegraf/testutil" ) type mockFetcher struct { } -func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { +func (h *mockFetcher) Fetch(_ string) ([]hddtemp.Disk, error) { return []hddtemp.Disk{ { DeviceName: "Disk1", @@ -27,21 +28,20 @@ func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { Unit: "C", }, }, nil - } func newMockFetcher() *mockFetcher { return &mockFetcher{} } func TestFetch(t *testing.T) { - hddtemp := &HDDTemp{ + hddTemp := &HDDTemp{ fetcher: newMockFetcher(), Address: "localhost", Devices: []string{"*"}, } acc := &testutil.Accumulator{} - err := hddtemp.Gather(acc) + err := hddTemp.Gather(acc) require.NoError(t, err) assert.Equal(t, acc.NFields(), 2) @@ -79,5 +79,4 @@ func TestFetch(t *testing.T) { for _, test := range tests { acc.AssertContainsTaggedFields(t, "hddtemp", test.fields, test.tags) } - } diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 59abd82562672..95591b9f0ad22 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -34,6 +34,15 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The # username = "username" # password = "pa$$word" + ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + + ## HTTP Proxy support + # http_proxy_url = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -41,6 +50,15 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Amount of time allowed to complete the HTTP request # timeout = "5s" @@ -64,3 +82,7 @@ The default values below are added if the input format does not specify a value: - http - tags: - url + +### Optional Cookie Authentication Settings: + +The optional Cookie Authentication Settings will retrieve a cookie from the given authorization endpoint, and use it in subsequent API requests. This is useful for services that do not provide OAuth or Basic Auth authentication, e.g. the [Tesla Powerwall API](https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network), which uses a Cookie Auth Body to retrieve an authorization cookie. The Cookie Auth Renewal interval will renew the authorization by retrieving a new cookie at the given interval. diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c247d40076620..d7a6ac1213b6f 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -1,17 +1,17 @@ package http import ( + "context" "fmt" "io" - "io/ioutil" "net/http" + "os" "strings" "sync" - "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) @@ -27,16 +27,15 @@ type HTTP struct { // HTTP Basic Auth Credentials Username string `toml:"username"` Password string `toml:"password"` - tls.ClientConfig // Absolute path to file with Bearer token BearerToken string `toml:"bearer_token"` SuccessStatusCodes []int `toml:"success_status_codes"` - Timeout internal.Duration `toml:"timeout"` - client *http.Client + httpconfig.HTTPClientConfig + Log telegraf.Logger `toml:"-"` // The parser will automatically be set by Telegraf core code because // this plugin implements the ParserInput interface (i.e. the SetParser method) @@ -70,6 +69,15 @@ var sampleConfig = ` ## compress body or "identity" to apply no encoding. # content_encoding = "identity" + ## HTTP Proxy support + # http_proxy_url = "" + + ## OAuth2 Client Credentials Grant + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -77,6 +85,15 @@ var sampleConfig = ` ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Amount of time allowed to complete the HTTP request # timeout = "5s" @@ -101,18 +118,13 @@ func (*HTTP) Description() string { } func (h *HTTP) Init() error { - tlsCfg, err := h.ClientConfig.TLSConfig() + ctx := context.Background() + client, err := h.HTTPClientConfig.CreateClient(ctx, h.Log) if err != nil { return err } - h.client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, - } + h.client = client // Set default as [200] if len(h.SuccessStatusCodes) == 0 { @@ -168,7 +180,7 @@ func (h *HTTP) gatherURL( } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return err } @@ -213,7 +225,7 @@ func (h *HTTP) gatherURL( h.SuccessStatusCodes) } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -242,14 +254,13 @@ func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) } return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func init() { inputs.Add("http", func() telegraf.Input { return &HTTP{ - Timeout: internal.Duration{Duration: time.Second * 5}, - Method: "GET", + Method: "GET", } }) } diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 993eda7321c0f..c485167205708 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -3,18 +3,22 @@ package http_test import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "net/url" "testing" - plugin "github.com/influxdata/telegraf/plugins/inputs/http" + "github.com/stretchr/testify/require" + + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + "github.com/influxdata/telegraf/plugins/common/oauth" + httpplugin "github.com/influxdata/telegraf/plugins/inputs/http" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) -func TestHTTPwithJSONFormat(t *testing.T) { +func TestHTTPWithJSONFormat(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { _, _ = w.Write([]byte(simpleJSON)) @@ -24,9 +28,9 @@ func TestHTTPwithJSONFormat(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, } metricName := "metricName" @@ -37,7 +41,7 @@ func TestHTTPwithJSONFormat(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) require.Len(t, acc.Metrics, 1) @@ -47,7 +51,7 @@ func TestHTTPwithJSONFormat(t *testing.T) { require.Equal(t, metric.Measurement, metricName) require.Len(t, acc.Metrics[0].Fields, 1) require.Equal(t, acc.Metrics[0].Fields["a"], 1.2) - require.Equal(t, acc.Metrics[0].Tags["url"], url) + require.Equal(t, acc.Metrics[0].Tags["url"], address) } func TestHTTPHeaders(t *testing.T) { @@ -66,9 +70,9 @@ func TestHTTPHeaders(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, Headers: map[string]string{header: headerValue}, } @@ -79,7 +83,7 @@ func TestHTTPHeaders(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -89,9 +93,9 @@ func TestInvalidStatusCode(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, } metricName := "metricName" @@ -102,7 +106,7 @@ func TestInvalidStatusCode(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.Error(t, acc.GatherError(plugin.Gather)) } @@ -112,9 +116,9 @@ func TestSuccessStatusCodes(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, SuccessStatusCodes: []int{200, 202}, } @@ -126,7 +130,7 @@ func TestSuccessStatusCodes(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -140,7 +144,7 @@ func TestMethod(t *testing.T) { })) defer fakeServer.Close() - plugin := &plugin.HTTP{ + plugin := &httpplugin.HTTP{ URLs: []string{fakeServer.URL}, Method: "POST", } @@ -152,7 +156,7 @@ func TestMethod(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -166,21 +170,21 @@ func TestBodyAndContentEncoding(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() - url := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + address := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) tests := []struct { name string - plugin *plugin.HTTP + plugin *httpplugin.HTTP queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) }{ { name: "no body", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ Method: "POST", - URLs: []string{url}, + URLs: []string{address}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte(""), body) w.WriteHeader(http.StatusOK) @@ -188,13 +192,13 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "post body", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "POST", Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -202,13 +206,13 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "get method body is sent", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -216,8 +220,8 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "gzip encoding", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", ContentEncoding: "gzip", @@ -227,7 +231,7 @@ func TestBodyAndContentEncoding(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -246,7 +250,86 @@ func TestBodyAndContentEncoding(t *testing.T) { tt.plugin.SetParser(parser) var acc testutil.Accumulator - tt.plugin.Init() + require.NoError(t, tt.plugin.Init()) + err = tt.plugin.Gather(&acc) + require.NoError(t, err) + }) + } +} + +type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + +func TestOAuthClientCredentialsGrant(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + var token = "2YotnFZFEjr1zCsicMWpAA" + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *httpplugin.HTTP + tokenHandler TestHandlerFunc + handler TestHandlerFunc + }{ + { + name: "no credentials", + plugin: &httpplugin.HTTP{ + URLs: []string{u.String()}, + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Len(t, r.Header["Authorization"], 0) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "success", + plugin: &httpplugin.HTTP{ + URLs: []string{u.String() + "/write"}, + HTTPClientConfig: httpconfig.HTTPClientConfig{ + OAuth2Config: oauth.OAuth2Config{ + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + }, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + _, err := w.Write([]byte(values.Encode())) + require.NoError(t, err) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + tt.handler(t, w, r) + case "/token": + tt.tokenHandler(t, w, r) + } + }) + + parser, _ := parsers.NewValueParser("metric", "string", "", nil) + tt.plugin.SetParser(parser) + err = tt.plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator err = tt.plugin.Gather(&acc) require.NoError(t, err) }) diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index 05e48058667ef..a87ec3f833890 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -1,11 +1,13 @@ # HTTP Listener v2 Input Plugin HTTP Listener v2 is a service input plugin that listens for metrics sent via -HTTP. Metrics may be sent in any supported [data format][data_format]. +HTTP. Metrics may be sent in any supported [data format][data_format]. For metrics in +[InfluxDB Line Protocol][line_protocol] it's recommended to use the [`influxdb_listener`][influxdb_listener] +or [`influxdb_v2_listener`][influxdb_v2_listener] instead. **Note:** The plugin previously known as `http_listener` has been renamed `influxdb_listener`. If you would like Telegraf to act as a proxy/relay for -InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener]. +InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener] or [`influxdb_v2_listener`][influxdb_v2_listener]. ### Configuration: @@ -17,7 +19,14 @@ This is a sample configuration for the plugin. service_address = ":8080" ## Path to listen to. - # path = "/telegraf" + ## This option is deprecated and only available for backward-compatibility. Please use paths instead. + # path = "" + + ## Paths to listen to. + # paths = ["/telegraf"] + + ## Save path as http_listener_v2_path tag if set to true + # path_tag = false ## HTTP methods to accept. # methods = ["POST", "PUT"] @@ -83,3 +92,5 @@ curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42' [data_format]: /docs/DATA_FORMATS_INPUT.md [influxdb_listener]: /plugins/inputs/influxdb_listener/README.md +[line_protocol]: https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/ +[influxdb_v2_listener]: /plugins/inputs/influxdb_v2_listener/README.md diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 1023c0d10bcf5..d2a2e5f35214e 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -4,7 +4,7 @@ import ( "compress/gzip" "crypto/subtle" "crypto/tls" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -12,8 +12,10 @@ import ( "sync" "time" + "github.com/golang/snappy" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -25,8 +27,9 @@ import ( const defaultMaxBodySize = 500 * 1024 * 1024 const ( - body = "body" - query = "query" + body = "body" + query = "query" + pathTag = "http_listener_v2_path" ) // TimeFunc provides a timestamp for the metrics @@ -36,11 +39,13 @@ type TimeFunc func() time.Time type HTTPListenerV2 struct { ServiceAddress string `toml:"service_address"` Path string `toml:"path"` + Paths []string `toml:"paths"` + PathTag bool `toml:"path_tag"` Methods []string `toml:"methods"` DataSource string `toml:"data_source"` - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MaxBodySize config.Size `toml:"max_body_size"` Port int `toml:"port"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` @@ -63,7 +68,14 @@ const sampleConfig = ` service_address = ":8080" ## Path to listen to. - # path = "/telegraf" + ## This option is deprecated and only available for backward-compatibility. Please use paths instead. + # path = "" + + ## Paths to listen to. + # paths = ["/telegraf"] + + ## Save path as http_listener_v2_path tag if set to true + # path_tag = false ## HTTP methods to accept. # methods = ["POST", "PUT"] @@ -74,7 +86,7 @@ const sampleConfig = ` # write_timeout = "10s" ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # max_body_size = "500MB" ## Part of the request to consume. Available options are "body" and @@ -124,15 +136,20 @@ func (h *HTTPListenerV2) SetParser(parser parsers.Parser) { // Start starts the http listener service. func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 + if h.ReadTimeout < config.Duration(time.Second) { + h.ReadTimeout = config.Duration(time.Second * 10) + } + if h.WriteTimeout < config.Duration(time.Second) { + h.WriteTimeout = config.Duration(time.Second * 10) } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 + + // Append h.Path to h.Paths + if h.Path != "" && !choice.Contains(h.Path, h.Paths) { + h.Paths = append(h.Paths, h.Path) } h.acc = acc @@ -145,8 +162,8 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { server := &http.Server{ Addr: h.ServiceAddress, Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), TLSConfig: tlsConf, } @@ -165,7 +182,9 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { h.wg.Add(1) go func() { defer h.wg.Done() - server.Serve(h.listener) + if err := server.Serve(h.listener); err != nil { + h.Log.Errorf("Serve failed: %v", err) + } }() h.Log.Infof("Listening on %s", listener.Addr().String()) @@ -175,14 +194,18 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { // Stop cleans up all resources func (h *HTTPListenerV2) Stop() { - h.listener.Close() + if h.listener != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive + h.listener.Close() + } h.wg.Wait() } func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { handler := h.serveWrite - if req.URL.Path != h.Path { + if !choice.Contains(req.URL.Path, h.Paths) { handler = http.NotFound } @@ -191,8 +214,10 @@ func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res) + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } @@ -205,7 +230,9 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } } if !isAcceptedMethod { - methodNotAllowed(res) + if err := methodNotAllowed(res); err != nil { + h.Log.Debugf("error in method-not-allowed: %v", err) + } return } @@ -226,7 +253,9 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) metrics, err := h.Parse(bytes) if err != nil { h.Log.Debugf("Parse error: %s", err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } @@ -238,6 +267,10 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } } + if h.PathTag { + m.AddTag(pathTag, req.URL.Path) + } + h.acc.AddMetric(m) } @@ -245,28 +278,60 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) ([]byte, bool) { - body := req.Body + encoding := req.Header.Get("Content-Encoding") - // Handle gzip request bodies - if req.Header.Get("Content-Encoding") == "gzip" { - var err error - body, err = gzip.NewReader(req.Body) + switch encoding { + case "gzip": + r, err := gzip.NewReader(req.Body) if err != nil { h.Log.Debug(err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } - defer body.Close() - } - - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) - bytes, err := ioutil.ReadAll(body) - if err != nil { - tooLarge(res) - return nil, false + defer r.Close() + maxReader := http.MaxBytesReader(res, r, int64(h.MaxBodySize)) + bytes, err := io.ReadAll(maxReader) + if err != nil { + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } + return nil, false + } + return bytes, true + case "snappy": + defer req.Body.Close() + bytes, err := io.ReadAll(req.Body) + if err != nil { + h.Log.Debug(err.Error()) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return nil, false + } + // snappy block format is only supported by decode/encode not snappy reader/writer + bytes, err = snappy.Decode(nil, bytes) + if err != nil { + h.Log.Debug(err.Error()) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return nil, false + } + return bytes, true + default: + defer req.Body.Close() + bytes, err := io.ReadAll(req.Body) + if err != nil { + h.Log.Debug(err.Error()) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return nil, false + } + return bytes, true } - - return bytes, true } func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request) ([]byte, bool) { @@ -275,34 +340,34 @@ func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request query, err := url.QueryUnescape(rawQuery) if err != nil { h.Log.Debugf("Error parsing query: %s", err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } return []byte(query), true } -func tooLarge(res http.ResponseWriter) { +func tooLarge(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) + _, err := res.Write([]byte(`{"error":"http: request body too large"}`)) + return err } -func methodNotAllowed(res http.ResponseWriter) { +func methodNotAllowed(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusMethodNotAllowed) - res.Write([]byte(`{"error":"http: method not allowed"}`)) + _, err := res.Write([]byte(`{"error":"http: method not allowed"}`)) + return err } -func internalServerError(res http.ResponseWriter) { - res.Header().Set("Content-Type", "application/json") - res.WriteHeader(http.StatusInternalServerError) -} - -func badRequest(res http.ResponseWriter) { +func badRequest(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(`{"error":"http: bad request"}`)) + _, err := res.Write([]byte(`{"error":"http: bad request"}`)) + return err } func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { @@ -311,7 +376,6 @@ func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.Re if !ok || subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 || subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 { - http.Error(res, "Unauthorized.", http.StatusUnauthorized) return } @@ -326,7 +390,7 @@ func init() { return &HTTPListenerV2{ ServiceAddress: ":8080", TimeFunc: time.Now, - Path: "/telegraf", + Paths: []string{"/telegraf"}, Methods: []string{"POST", "PUT"}, DataSource: body, } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 4457fcacda79d..bf320d6f05174 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -4,19 +4,21 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/golang/snappy" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -52,7 +54,7 @@ func newTestHTTPListenerV2() *HTTPListenerV2 { Methods: []string{"POST"}, Parser: parser, TimeFunc: time.Now, - MaxBodySize: internal.Size{Size: 70000}, + MaxBodySize: config.Size(70000), DataSource: "body", } return listener @@ -103,6 +105,27 @@ func createURL(listener *HTTPListenerV2, scheme string, path string, rawquery st return u.String() } +func TestInvalidListenerConfig(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + Log: testutil.Logger{}, + ServiceAddress: "address_without_port", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + TimeFunc: time.Now, + MaxBodySize: config.Size(70000), + DataSource: "body", + } + + acc := &testutil.Accumulator{} + require.Error(t, listener.Start(acc)) + + // Stop is called when any ServiceInput fails to start; it must succeed regardless of state + listener.Stop() +} + func TestWriteHTTPSNoClientAuth(t *testing.T) { listener := newTestHTTPSListenerV2() listener.TLSAllowedCACerts = nil @@ -124,7 +147,7 @@ func TestWriteHTTPSNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -138,7 +161,7 @@ func TestWriteHTTPSWithClientAuth(t *testing.T) { // post single message to listener resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -156,7 +179,7 @@ func TestWriteHTTPBasicAuth(t *testing.T) { req.SetBasicAuth(basicUsername, basicPassword) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -170,7 +193,7 @@ func TestWriteHTTP(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -182,7 +205,7 @@ func TestWriteHTTP(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -198,7 +221,7 @@ func TestWriteHTTP(t *testing.T) { // Post a gigantic metric to the listener and verify that an error is returned: resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) acc.Wait(3) @@ -208,6 +231,62 @@ func TestWriteHTTP(t *testing.T) { ) } +// http listener should add request path as configured path_tag +func TestWriteHTTPWithPathTag(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.PathTag = true + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/write"}, + ) +} + +// http listener should add request path as configured path_tag (trimming it before) +func TestWriteHTTPWithMultiplePaths(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.Paths = []string{"/alternative_write"} + listener.PathTag = true + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to /write + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + // post single message to /alternative_write + resp, err = http.Post(createURL(listener, "http", "/alternative_write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/write"}, + ) + + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/alternative_write"}, + ) +} + // http listener should add a newline at the end of the buffer if it's not there func TestWriteHTTPNoNewline(t *testing.T) { listener := newTestHTTPListenerV2() @@ -219,7 +298,7 @@ func TestWriteHTTPNoNewline(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -238,7 +317,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: internal.Size{Size: int64(len(hugeMetric))}, + MaxBodySize: config.Size(len(hugeMetric)), TimeFunc: time.Now, } @@ -248,7 +327,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -261,7 +340,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: internal.Size{Size: 4096}, + MaxBodySize: config.Size(4096), TimeFunc: time.Now, } @@ -271,7 +350,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) } @@ -283,7 +362,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) @@ -293,6 +372,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", @@ -306,6 +386,41 @@ func TestWriteHTTPGzippedData(t *testing.T) { } } +// test that writing snappy data works +func TestWriteHTTPSnappyData(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + testData := "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + encodedData := snappy.Encode(nil, []byte(testData)) + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(encodedData)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "snappy") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Log("Test client request failed. Error: ", err) + } + require.NoErrorf(t, resp.Body.Close(), "Test client close failed. Error: %v", err) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + hostTags := []string{"server01"} + acc.Wait(1) + + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + // writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { if runtime.GOOS == "darwin" { @@ -325,15 +440,21 @@ func TestWriteHTTPHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) @@ -349,7 +470,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 404, resp.StatusCode) } @@ -363,7 +484,7 @@ func TestWriteHTTPInvalid(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -377,7 +498,7 @@ func TestWriteHTTPEmpty(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -397,7 +518,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { resp, err := http.DefaultClient.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -409,7 +530,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { // post single message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -435,7 +556,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { resp, err := http.DefaultClient.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -460,7 +581,7 @@ func TestWriteHTTPQueryParams(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", "tagKey=tagValue&fieldKey=42"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -484,7 +605,7 @@ func TestWriteHTTPFormData(t *testing.T) { "fieldKey": {"42"}, }) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 67d0dc067f691..81b512e80743f 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -51,12 +51,20 @@ This input plugin checks HTTP/HTTPS connections. # response_string_match = "ok" # response_string_match = "\".*_status\".?:.?\"up\"" + ## Expected response status code. + ## The status code of the response is compared to this value. If they match, the field + ## "response_status_code_match" will be 1, otherwise it will be 0. If the + ## expected status code is 0, the check is disabled and the field won't be added. + # response_status_code = 0 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Use the given name as the SNI server name on each URL + # tls_server_name = "" ## HTTP Request Headers (all values must be strings) # [inputs.http_response.headers] @@ -83,8 +91,9 @@ This input plugin checks HTTP/HTTPS connections. - response_time (float, seconds) - content_length (int, response body length) - response_string_match (int, 0 = mismatch / body read error, 1 = match) + - response_status_code_match (int, 0 = mismatch, 1 = match) - http_response_code (int, response status code) - - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) + - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) #### `result` / `result_code` @@ -93,14 +102,15 @@ Upon finishing polling the target server, the plugin registers the result of the This tag is used to expose network and plugin errors. HTTP errors are considered a successful connection. -|Tag value |Corresponding field value|Description| ---------------------------|-------------------------|-----------| -|success | 0 |The HTTP request completed, even if the HTTP code represents an error| -|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| -|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error. Or the option `response_body_field` was used and the content of the response body was not a valid utf-8. Or the size of the body of the response exceeded the `response_body_max_size` | -|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| -|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| -|dns_error | 5 |There was a DNS error while attempting to connect to the host| +|Tag value |Corresponding field value|Description| +-------------------------------|-------------------------|-----------| +|success | 0 |The HTTP request completed, even if the HTTP code represents an error| +|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| +|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error. Or the option `response_body_field` was used and the content of the response body was not a valid utf-8. Or the size of the body of the response exceeded the `response_body_max_size` | +|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| +|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| +|dns_error | 5 |There was a DNS error while attempting to connect to the host| +|response_status_code_mismatch | 6 |The option `response_status_code_match` was used, and the status code of the response didn't match the value.| ### Example Output: diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 33888503b068f..799f664d1e7b0 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" + "os" "regexp" "strconv" "strings" @@ -15,7 +15,7 @@ import ( "unicode/utf8" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -33,15 +33,16 @@ type HTTPResponse struct { HTTPProxy string `toml:"http_proxy"` Body string Method string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration HTTPHeaderTags map[string]string `toml:"http_header_tags"` Headers map[string]string FollowRedirects bool // Absolute path to file with Bearer token - BearerToken string `toml:"bearer_token"` - ResponseBodyField string `toml:"response_body_field"` - ResponseBodyMaxSize internal.Size `toml:"response_body_max_size"` + BearerToken string `toml:"bearer_token"` + ResponseBodyField string `toml:"response_body_field"` + ResponseBodyMaxSize config.Size `toml:"response_body_max_size"` ResponseStringMatch string + ResponseStatusCode int Interface string // HTTP Basic Auth Credentials Username string `toml:"username"` @@ -51,7 +52,11 @@ type HTTPResponse struct { Log telegraf.Logger compiledStringMatch *regexp.Regexp - client *http.Client + client httpClient +} + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) } // Description returns the plugin Description @@ -92,8 +97,8 @@ var sampleConfig = ` # {'fake':'data'} # ''' - ## Optional name of the field that will contain the body of the response. - ## By default it is set to an empty String indicating that the body's content won't be added + ## Optional name of the field that will contain the body of the response. + ## By default it is set to an empty String indicating that the body's content won't be added # response_body_field = '' ## Maximum allowed HTTP response body size in bytes. @@ -106,6 +111,12 @@ var sampleConfig = ` # response_string_match = "ok" # response_string_match = "\".*_status\".?:.?\"up\"" + ## Expected response status code. + ## The status code of the response is compared to this value. If they match, the field + ## "response_status_code_match" will be 1, otherwise it will be 0. If the + ## expected status code is 0, the check is disabled and the field won't be added. + # response_status_code = 0 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -135,11 +146,11 @@ func (h *HTTPResponse) SampleConfig() string { var ErrRedirectAttempted = errors.New("redirect") // Set the proxy. A configured proxy overwrites the system wide proxy. -func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { - if http_proxy == "" { +func getProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) { + if httpProxy == "" { return http.ProxyFromEnvironment } - proxyURL, err := url.Parse(http_proxy) + proxyURL, err := url.Parse(httpProxy) if err != nil { return func(_ *http.Request) (*url.URL, error) { return nil, errors.New("bad proxy: " + err.Error()) @@ -150,9 +161,9 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { } } -// createHttpClient creates an http client which will timeout at the specified +// createHTTPClient creates an http client which will timeout at the specified // timeout period and can follow redirects if specified -func (h *HTTPResponse) createHttpClient() (*http.Client, error) { +func (h *HTTPResponse) createHTTPClient() (*http.Client, error) { tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -174,10 +185,10 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) { DisableKeepAlives: true, TLSClientConfig: tlsCfg, }, - Timeout: h.ResponseTimeout.Duration, + Timeout: time.Duration(h.ResponseTimeout), } - if h.FollowRedirects == false { + if !h.FollowRedirects { client.CheckRedirect = func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse } @@ -206,19 +217,20 @@ func localAddress(interfaceName string) (net.Addr, error) { return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName) } -func setResult(result_string string, fields map[string]interface{}, tags map[string]string) { - result_codes := map[string]int{ - "success": 0, - "response_string_mismatch": 1, - "body_read_error": 2, - "connection_failed": 3, - "timeout": 4, - "dns_error": 5, +func setResult(resultString string, fields map[string]interface{}, tags map[string]string) { + resultCodes := map[string]int{ + "success": 0, + "response_string_mismatch": 1, + "body_read_error": 2, + "connection_failed": 3, + "timeout": 4, + "dns_error": 5, + "response_status_code_mismatch": 6, } - tags["result"] = result_string - fields["result_type"] = result_string - fields["result_code"] = result_codes[result_string] + tags["result"] = resultString + fields["result_type"] = resultString + fields["result_code"] = resultCodes[resultString] } func setError(err error, fields map[string]interface{}, tags map[string]string) error { @@ -227,18 +239,18 @@ func setError(err error, fields map[string]interface{}, tags map[string]string) return timeoutError } - urlErr, isUrlErr := err.(*url.Error) - if !isUrlErr { + urlErr, isURLErr := err.(*url.Error) + if !isURLErr { return nil } opErr, isNetErr := (urlErr.Err).(*net.OpError) if isNetErr { switch e := (opErr.Err).(type) { - case (*net.DNSError): + case *net.DNSError: setResult("dns_error", fields, tags) return e - case (*net.ParseError): + case *net.ParseError: // Parse error has to do with parsing of IP addresses, so we // group it with address errors setResult("address_error", fields, tags) @@ -265,7 +277,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return nil, nil, err } @@ -287,7 +299,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // Start Timer start := time.Now() resp, err := h.client.Do(request) - response_time := time.Since(start).Seconds() + responseTime := time.Since(start).Seconds() // If an error in returned, it means we are dealing with a network error, as // HTTP error codes do not generate errors in the net/http library @@ -296,20 +308,16 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] h.Log.Debugf("Network error while polling %s: %s", u, err.Error()) // Get error details - netErr := setError(err, fields, tags) - - // If recognize the returned error, get out - if netErr != nil { - return fields, tags, nil + if setError(err, fields, tags) == nil { + // Any error not recognized by `set_error` is considered a "connection_failed" + setResult("connection_failed", fields, tags) } - // Any error not recognized by `set_error` is considered a "connection_failed" - setResult("connection_failed", fields, tags) return fields, tags, nil } if _, ok := fields["response_time"]; !ok { - fields["response_time"] = response_time + fields["response_time"] = responseTime } // This function closes the response body, as @@ -328,12 +336,12 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] tags["status_code"] = strconv.Itoa(resp.StatusCode) fields["http_response_code"] = resp.StatusCode - if h.ResponseBodyMaxSize.Size == 0 { - h.ResponseBodyMaxSize.Size = defaultResponseBodyMaxSize + if h.ResponseBodyMaxSize == 0 { + h.ResponseBodyMaxSize = config.Size(defaultResponseBodyMaxSize) } - bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, h.ResponseBodyMaxSize.Size+1)) + bodyBytes, err := io.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) // Check first if the response body size exceeds the limit. - if err == nil && int64(len(bodyBytes)) > h.ResponseBodyMaxSize.Size { + if err == nil && int64(len(bodyBytes)) > int64(h.ResponseBodyMaxSize) { h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags) return fields, tags, nil } else if err != nil { @@ -352,16 +360,31 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } fields["content_length"] = len(bodyBytes) - // Check the response for a regex match. + var success = true + + // Check the response for a regex if h.ResponseStringMatch != "" { if h.compiledStringMatch.Match(bodyBytes) { - setResult("success", fields, tags) fields["response_string_match"] = 1 } else { + success = false setResult("response_string_mismatch", fields, tags) fields["response_string_match"] = 0 } - } else { + } + + // Check the response status code + if h.ResponseStatusCode > 0 { + if resp.StatusCode == h.ResponseStatusCode { + fields["response_status_code_match"] = 1 + } else { + success = false + setResult("response_status_code_mismatch", fields, tags) + fields["response_status_code_match"] = 0 + } + } + + if success { setResult("success", fields, tags) } @@ -369,8 +392,8 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } // Set result in case of a body read error -func (h *HTTPResponse) setBodyReadError(error_msg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { - h.Log.Debugf(error_msg) +func (h *HTTPResponse) setBodyReadError(errorMsg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { + h.Log.Debugf(errorMsg) setResult("body_read_error", fields, tags) fields["content_length"] = len(bodyBytes) if h.ResponseStringMatch != "" { @@ -385,13 +408,13 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { var err error h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch) if err != nil { - return fmt.Errorf("Failed to compile regular expression %s : %s", h.ResponseStringMatch, err) + return fmt.Errorf("failed to compile regular expression %s : %s", h.ResponseStringMatch, err) } } // Set default values - if h.ResponseTimeout.Duration < time.Second { - h.ResponseTimeout.Duration = time.Second * 5 + if h.ResponseTimeout < config.Duration(time.Second) { + h.ResponseTimeout = config.Duration(time.Second * 5) } // Check send and expected string if h.Method == "" { @@ -408,7 +431,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { } if h.client == nil { - client, err := h.createHttpClient() + client, err := h.createHTTPClient() if err != nil { return err } @@ -423,7 +446,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { } if addr.Scheme != "http" && addr.Scheme != "https" { - acc.AddError(errors.New("Only http and https are supported")) + acc.AddError(errors.New("only http and https are supported")) continue } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 5a256e6e58d2a..5d109d0a35439 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -1,17 +1,24 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when https://github.com/influxdata/telegraf/issues/8451 is fixed + package http_response import ( "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" + "net/url" "testing" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -82,21 +89,26 @@ func checkTags(t *testing.T, tags map[string]interface{}, acc *testutil.Accumula func setUpTestMux() http.Handler { mux := http.NewServeMux() + // Ignore all returned errors below as the tests will fail anyway mux.HandleFunc("/redirect", func(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, "/good", http.StatusMovedPermanently) }) mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Server", "MyTestServer") w.Header().Set("Content-Type", "application/json; charset=utf-8") + //nolint:errcheck,revive fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/invalidUTF8", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive w.Write([]byte{0xff, 0xfe, 0xfd}) }) mux.HandleFunc("/noheader", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/jsonresponse", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive fmt.Fprintf(w, "\"service_status\": \"up\", \"healthy\" : \"true\"") }) mux.HandleFunc("/badredirect", func(w http.ResponseWriter, req *http.Request) { @@ -107,10 +119,12 @@ func setUpTestMux() http.Handler { http.Error(w, "method wasn't post", http.StatusMethodNotAllowed) return } + //nolint:errcheck,revive fmt.Fprintf(w, "used post correctly!") }) mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) + body, err := io.ReadAll(req.Body) + //nolint:errcheck,revive req.Body.Close() if err != nil { http.Error(w, "couldn't read request body", http.StatusBadRequest) @@ -120,11 +134,14 @@ func setUpTestMux() http.Handler { http.Error(w, "body was empty", http.StatusBadRequest) return } + //nolint:errcheck,revive fmt.Fprintf(w, "sent a body!") }) mux.HandleFunc("/twosecondnap", func(w http.ResponseWriter, req *http.Request) { time.Sleep(time.Second * 2) - return + }) + mux.HandleFunc("/nocontent", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNoContent) }) return mux } @@ -159,9 +176,9 @@ func TestHeaders(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL, + URLs: []string{ts.URL}, Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 2}, + ResponseTimeout: config.Duration(time.Second * 2), Headers: map[string]string{ "Content-Type": "application/json", "Host": "Hello", @@ -195,10 +212,10 @@ func TestFields(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -233,10 +250,10 @@ func TestResponseBodyField(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -268,10 +285,10 @@ func TestResponseBodyField(t *testing.T) { // Invalid UTF-8 String h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/invalidUTF8", + URLs: []string{ts.URL + "/invalidUTF8"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -302,14 +319,14 @@ func TestResponseBodyMaxSize(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, - ResponseBodyMaxSize: internal.Size{Size: 5}, + ResponseBodyMaxSize: config.Size(5), FollowRedirects: true, } @@ -336,10 +353,10 @@ func TestHTTPHeaderTags(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, Headers: map[string]string{ "Content-Type": "application/json", @@ -371,10 +388,10 @@ func TestHTTPHeaderTags(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/noheader", + URLs: []string{ts.URL + "/noheader"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, Headers: map[string]string{ "Content-Type": "application/json", @@ -397,10 +414,10 @@ func TestHTTPHeaderTags(t *testing.T) { // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, - Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, FollowRedirects: false, } @@ -453,10 +470,10 @@ func TestInterface(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -492,10 +509,10 @@ func TestRedirects(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/redirect", + URLs: []string{ts.URL + "/redirect"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -523,10 +540,10 @@ func TestRedirects(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/badredirect", + URLs: []string{ts.URL + "/badredirect"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -560,10 +577,10 @@ func TestMethod(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "POST", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -591,10 +608,10 @@ func TestMethod(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -623,10 +640,10 @@ func TestMethod(t *testing.T) { //check that lowercase methods work correctly h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "head", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -660,10 +677,10 @@ func TestBody(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/musthaveabody", + URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -691,9 +708,9 @@ func TestBody(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/musthaveabody", + URLs: []string{ts.URL + "/musthaveabody"}, Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -725,11 +742,11 @@ func TestStringMatch(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the good page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -763,11 +780,11 @@ func TestStringMatchJson(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/jsonresponse", + URLs: []string{ts.URL + "/jsonresponse"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "\"service_status\": \"up\"", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -801,11 +818,11 @@ func TestStringMatchFail(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the bad page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -844,10 +861,10 @@ func TestTimeout(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/twosecondnap", + URLs: []string{ts.URL + "/twosecondnap"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second}, + ResponseTimeout: config.Duration(time.Second), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -878,11 +895,11 @@ func TestBadRegex(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "bad regex:[[", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -898,15 +915,25 @@ func TestBadRegex(t *testing.T) { checkOutput(t, &acc, nil, nil, absentFields, absentTags) } +type fakeClient struct { + statusCode int + err error +} + +func (f *fakeClient) Do(_ *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: f.statusCode}, f.err +} + func TestNetworkErrors(t *testing.T) { // DNS error h := &HTTPResponse{ Log: testutil.Logger{}, - Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here + URLs: []string{"https://nonexistent.nonexistent"}, // Any non-resolvable URL works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), FollowRedirects: false, + client: &fakeClient{err: &url.Error{Err: &net.OpError{Err: &net.DNSError{Err: "DNS error"}}}}, } var acc testutil.Accumulator @@ -929,10 +956,10 @@ func TestNetworkErrors(t *testing.T) { // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, - Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), FollowRedirects: false, } @@ -964,7 +991,7 @@ func TestContentLength(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -995,7 +1022,7 @@ func TestContentLength(t *testing.T) { URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -1029,7 +1056,8 @@ func TestRedirect(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Location", "http://example.org") w.WriteHeader(http.StatusMovedPermanently) - w.Write([]byte("test")) + _, err := w.Write([]byte("test")) + require.NoError(t, err) }) plugin := &HTTPResponse{ @@ -1079,10 +1107,10 @@ func TestBasicAuth(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Username: "me", Password: "mypassword", Headers: map[string]string{ @@ -1110,3 +1138,176 @@ func TestBasicAuth(t *testing.T) { absentFields := []string{"response_string_match"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) } + +func TestStatusCodeMatchFail(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/nocontent"}, + ResponseStatusCode: http.StatusOK, + ResponseTimeout: config.Duration(time.Second * 20), + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusNoContent, + "response_status_code_match": 0, + "result_type": "response_status_code_mismatch", + "result_code": 6, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "204", + "result": "response_status_code_mismatch", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} + +func TestStatusCodeMatch(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/nocontent"}, + ResponseStatusCode: http.StatusNoContent, + ResponseTimeout: config.Duration(time.Second * 20), + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusNoContent, + "response_status_code_match": 1, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "204", + "result": "success", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} + +func TestStatusCodeAndStringMatch(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/good"}, + ResponseStatusCode: http.StatusOK, + ResponseStringMatch: "hit the good page", + ResponseTimeout: config.Duration(time.Second * 20), + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "response_status_code_match": 1, + "response_string_match": 1, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "200", + "result": "success", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} + +func TestStatusCodeAndStringMatchFail(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/nocontent"}, + ResponseStatusCode: http.StatusOK, + ResponseStringMatch: "hit the good page", + ResponseTimeout: config.Duration(time.Second * 20), + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusNoContent, + "response_status_code_match": 0, + "response_string_match": 0, + "result_type": "response_status_code_mismatch", + "result_code": 6, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "204", + "result": "response_status_code_mismatch", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} + +func TestSNI(t *testing.T) { + ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "super-special-hostname.example.com", r.TLS.ServerName) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/good"}, + Method: "GET", + ResponseTimeout: config.Duration(time.Second * 20), + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + ServerName: "super-special-hostname.example.com", + }, + } + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index 19fe014457734..3f7efb10a4098 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,8 +1,8 @@ # HTTP JSON Input Plugin -The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. +### DEPRECATED in Telegraf v1.6: Use [HTTP input plugin](../http) as replacement. -Deprecated (1.6): use the [http](../http) input. +The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. ### Configuration: diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index a5f5e47aad68e..10a4cb0c17643 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -3,7 +3,7 @@ package httpjson import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -11,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -21,13 +21,13 @@ var ( utf8BOM = []byte("\xef\xbb\xbf") ) -// HttpJson struct -type HttpJson struct { +// HTTPJSON struct +type HTTPJSON struct { Name string Servers []string Method string TagKeys []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration Parameters map[string]string Headers map[string]string tls.ClientConfig @@ -113,16 +113,16 @@ var sampleConfig = ` # apiVersion = "v1" ` -func (h *HttpJson) SampleConfig() string { +func (h *HTTPJSON) SampleConfig() string { return sampleConfig } -func (h *HttpJson) Description() string { +func (h *HTTPJSON) Description() string { return "Read flattened metrics from one or more JSON HTTP endpoints" } // Gathers data for all servers. -func (h *HttpJson) Gather(acc telegraf.Accumulator) error { +func (h *HTTPJSON) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup if h.client.HTTPClient() == nil { @@ -131,12 +131,12 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: h.ResponseTimeout.Duration, + ResponseHeaderTimeout: time.Duration(h.ResponseTimeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: h.ResponseTimeout.Duration, + Timeout: time.Duration(h.ResponseTimeout), } h.client.SetHTTPClient(client) } @@ -162,7 +162,7 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error { // // Returns: // error: Any error that may have occurred -func (h *HttpJson) gatherServer( +func (h *HTTPJSON) gatherServer( acc telegraf.Accumulator, serverURL string, ) error { @@ -171,11 +171,11 @@ func (h *HttpJson) gatherServer( return err } - var msrmnt_name string + var msrmntName string if h.Name == "" { - msrmnt_name = "httpjson" + msrmntName = "httpjson" } else { - msrmnt_name = "httpjson_" + h.Name + msrmntName = "httpjson_" + h.Name } tags := map[string]string{ "server": serverURL, @@ -183,7 +183,7 @@ func (h *HttpJson) gatherServer( parser, err := parsers.NewParser(&parsers.Config{ DataFormat: "json", - MetricName: msrmnt_name, + MetricName: msrmntName, TagKeys: h.TagKeys, DefaultTags: tags, }) @@ -207,7 +207,7 @@ func (h *HttpJson) gatherServer( return nil } -// Sends an HTTP request to the server using the HttpJson object's HTTPClient. +// Sends an HTTP request to the server using the HTTPJSON object's HTTPClient. // This request can be either a GET or a POST. // Parameters: // serverURL: endpoint to send request to @@ -215,7 +215,7 @@ func (h *HttpJson) gatherServer( // Returns: // string: body of the response // error : Any error that may have occurred -func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { +func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { @@ -263,7 +263,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } @@ -285,11 +285,9 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("httpjson", func() telegraf.Input { - return &HttpJson{ - client: &RealHTTPClient{}, - ResponseTimeout: internal.Duration{ - Duration: 5 * time.Second, - }, + return &HTTPJSON{ + client: &RealHTTPClient{}, + ResponseTimeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 90975919959e8..b203238a94037 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -2,7 +2,7 @@ package httpjson import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -143,7 +143,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -154,15 +154,15 @@ func (c *mockHTTPClient) HTTPClient() *http.Client { return nil } -// Generates a pointer to an HttpJson object that uses a mock HTTP client. +// Generates a pointer to an HTTPJSON object that uses a mock HTTP client. // Parameters: // response : Body of the response that the mock HTTP client should return // statusCode: HTTP status code the mock HTTP client should return // // Returns: -// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client -func genMockHttpJson(response string, statusCode int) []*HttpJson { - return []*HttpJson{ +// *HTTPJSON: Pointer to an HTTPJSON object that uses the generated mock HTTP client +func genMockHTTPJSON(response string, statusCode int) []*HTTPJSON { + return []*HTTPJSON{ { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ @@ -206,7 +206,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson { // Test that the proper values are ignored or collected func TestHttpJson200(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 200) + httpjson := genMockHTTPJSON(validJSON, 200) for _, service := range httpjson { var acc testutil.Accumulator @@ -233,11 +233,12 @@ func TestHttpJsonGET_URL(t *testing.T) { key := r.FormValue("api_key") assert.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err := fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL + "?api_key=mykey"}, Name: "", Method: "GET", @@ -305,11 +306,12 @@ func TestHttpJsonGET(t *testing.T) { key := r.FormValue("api_key") assert.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err := fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL}, Name: "", Method: "GET", @@ -375,15 +377,16 @@ func TestHttpJsonPOST(t *testing.T) { "api_key": "mykey", } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) assert.NoError(t, err) assert.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err = fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL}, Name: "", Method: "POST", @@ -445,7 +448,7 @@ func TestHttpJsonPOST(t *testing.T) { // Test response to HTTP 500 func TestHttpJson500(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 500) + httpjson := genMockHTTPJSON(validJSON, 500) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) @@ -456,7 +459,7 @@ func TestHttpJson500(t *testing.T) { // Test response to HTTP 405 func TestHttpJsonBadMethod(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 200) + httpjson := genMockHTTPJSON(validJSON, 200) httpjson[0].Method = "NOT_A_REAL_METHOD" var acc testutil.Accumulator @@ -468,7 +471,7 @@ func TestHttpJsonBadMethod(t *testing.T) { // Test response to malformed JSON func TestHttpJsonBadJson(t *testing.T) { - httpjson := genMockHttpJson(invalidJSON, 200) + httpjson := genMockHTTPJSON(invalidJSON, 200) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) @@ -479,7 +482,7 @@ func TestHttpJsonBadJson(t *testing.T) { // Test response to empty string as response object func TestHttpJsonEmptyResponse(t *testing.T) { - httpjson := genMockHttpJson(empty, 200) + httpjson := genMockHTTPJSON(empty, 200) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) @@ -488,7 +491,7 @@ func TestHttpJsonEmptyResponse(t *testing.T) { // Test that the proper values are ignored or collected func TestHttpJson200Tags(t *testing.T) { - httpjson := genMockHttpJson(validJSONTags, 200) + httpjson := genMockHTTPJSON(validJSONTags, 200) for _, service := range httpjson { if service.Name == "other_webapp" { @@ -526,7 +529,7 @@ const validJSONArrayTags = ` // Test that array data is collected correctly func TestHttpJsonArray200Tags(t *testing.T) { - httpjson := genMockHttpJson(validJSONArrayTags, 200) + httpjson := genMockHTTPJSON(validJSONArrayTags, 200) for _, service := range httpjson { if service.Name == "other_webapp" { @@ -563,7 +566,7 @@ var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]") // TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed func TestHttpJsonBOM(t *testing.T) { - httpjson := genMockHttpJson(string(jsonBOM), 200) + httpjson := genMockHTTPJSON(string(jsonBOM), 200) for _, service := range httpjson { if service.Name == "other_webapp" { diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 5ec0bb43db319..f56192a7a9282 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -18,7 +18,7 @@ type Icinga2 struct { ObjectType string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig Log telegraf.Logger @@ -53,7 +53,7 @@ type ObjectType string var sampleConfig = ` ## Required Icinga2 server address # server = "https://localhost:5665" - + ## Required Icinga2 object type ("services" or "hosts") # object_type = "services" @@ -82,7 +82,7 @@ func (i *Icinga2) SampleConfig() string { func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { for _, check := range checks { - url, err := url.Parse(i.Server) + serverURL, err := url.Parse(i.Server) if err != nil { i.Log.Error(err.Error()) continue @@ -106,16 +106,16 @@ func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { "check_command": check.Attrs.CheckCommand, "source": source, "state": levels[state], - "server": url.Hostname(), - "scheme": url.Scheme, - "port": url.Port(), + "server": serverURL.Hostname(), + "scheme": serverURL.Scheme, + "port": serverURL.Port(), } acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags) } } -func (i *Icinga2) createHttpClient() (*http.Client, error) { +func (i *Icinga2) createHTTPClient() (*http.Client, error) { tlsCfg, err := i.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -125,36 +125,36 @@ func (i *Icinga2) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: i.ResponseTimeout.Duration, + Timeout: time.Duration(i.ResponseTimeout), } return client, nil } func (i *Icinga2) Gather(acc telegraf.Accumulator) error { - if i.ResponseTimeout.Duration < time.Second { - i.ResponseTimeout.Duration = time.Second * 5 + if i.ResponseTimeout < config.Duration(time.Second) { + i.ResponseTimeout = config.Duration(time.Second * 5) } if i.client == nil { - client, err := i.createHttpClient() + client, err := i.createHTTPClient() if err != nil { return err } i.client = client } - requestUrl := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command" + requestURL := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command" // Note: attrs=host_name is only valid for 'services' requests, using check.Attrs.HostName for the host // 'hosts' requests will need to use attrs=name only, using check.Attrs.Name for the host if i.ObjectType == "services" { - requestUrl += "&attrs=host_name" + requestURL += "&attrs=host_name" } - url := fmt.Sprintf(requestUrl, i.Server, i.ObjectType) + address := fmt.Sprintf(requestURL, i.Server, i.ObjectType) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -171,7 +171,7 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { defer resp.Body.Close() result := Result{} - json.NewDecoder(resp.Body).Decode(&result) + err = json.NewDecoder(resp.Body).Decode(&result) if err != nil { return err } @@ -186,7 +186,7 @@ func init() { return &Icinga2{ Server: "https://localhost:5665", ObjectType: "services", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go index 13055ed8c2d16..2a965877aeada 100644 --- a/plugins/inputs/icinga2/icinga2_test.go +++ b/plugins/inputs/icinga2/icinga2_test.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGatherServicesStatus(t *testing.T) { @@ -30,7 +31,7 @@ func TestGatherServicesStatus(t *testing.T) { ` checks := Result{} - json.Unmarshal([]byte(s), &checks) + require.NoError(t, json.Unmarshal([]byte(s), &checks)) icinga2 := new(Icinga2) icinga2.Log = testutil.Logger{} @@ -84,7 +85,7 @@ func TestGatherHostsStatus(t *testing.T) { ` checks := Result{} - json.Unmarshal([]byte(s), &checks) + require.NoError(t, json.Unmarshal([]byte(s), &checks)) var acc testutil.Accumulator diff --git a/plugins/inputs/infiniband/infiniband.go b/plugins/inputs/infiniband/infiniband.go index 65e1d6c712998..8a99bb0e469b6 100644 --- a/plugins/inputs/infiniband/infiniband.go +++ b/plugins/inputs/infiniband/infiniband.go @@ -13,10 +13,10 @@ type Infiniband struct { // Sample configuration for plugin var InfinibandConfig = `` -func (_ *Infiniband) SampleConfig() string { +func (i *Infiniband) SampleConfig() string { return InfinibandConfig } -func (_ *Infiniband) Description() string { +func (i *Infiniband) Description() string { return "Gets counters from all InfiniBand cards and ports installed" } diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go index 48cd8a428900d..2868c683e7ebb 100644 --- a/plugins/inputs/infiniband/infiniband_linux.go +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package infiniband @@ -11,8 +12,7 @@ import ( ) // Gather statistics from our infiniband cards -func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { - +func (i *Infiniband) Gather(acc telegraf.Accumulator) error { rdmaDevices := rdmamap.GetRdmaDeviceList() if len(rdmaDevices) == 0 { @@ -41,7 +41,6 @@ func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { // Add the statistics to the accumulator func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) { - // Allow users to filter by card and port tags := map[string]string{"device": dev, "port": port} fields := make(map[string]interface{}) diff --git a/plugins/inputs/infiniband/infiniband_notlinux.go b/plugins/inputs/infiniband/infiniband_notlinux.go index 5b19672d975d8..8ad6731c17bd7 100644 --- a/plugins/inputs/infiniband/infiniband_notlinux.go +++ b/plugins/inputs/infiniband/infiniband_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package infiniband diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go index 6c4bb24587f4a..c382a1fdf9dd0 100644 --- a/plugins/inputs/infiniband/infiniband_test.go +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package infiniband @@ -38,7 +39,7 @@ func TestInfiniband(t *testing.T) { "port": "1", } - sample_rdmastats_entries := []rdmamap.RdmaStatEntry{ + sampleRdmastatsEntries := []rdmamap.RdmaStatEntry{ { Name: "excessive_buffer_overrun_errors", Value: uint64(0), @@ -127,8 +128,7 @@ func TestInfiniband(t *testing.T) { var acc testutil.Accumulator - addStats("m1x5_0", "1", sample_rdmastats_entries, &acc) + addStats("m1x5_0", "1", sampleRdmastatsEntries, &acc) acc.AssertContainsTaggedFields(t, "infiniband", fields, tags) - } diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index e17bd7072438b..8ba686aab1bd1 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -1,6 +1,8 @@ # InfluxDB Input Plugin -The InfluxDB plugin will collect metrics on the given InfluxDB servers. +The InfluxDB plugin will collect metrics on the given InfluxDB servers. Read our +[documentation](https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/) +for detailed information about `influxdb` metrics. This plugin can also gather metrics from endpoints that expose InfluxDB-formatted endpoints. See below for more information. @@ -39,43 +41,222 @@ InfluxDB-formatted endpoints. See below for more information. **Note:** The measurements and fields included in this plugin are dynamically built from the InfluxDB source, and may vary between versions: -- influxdb - - n_shards: The total number of shards in the specified database. -- influxdb_database: The database metrics are being collected from. -- influxdb_httpd: The URL to listen for network requests. By default, `http://localhost:8086/debug/var`. -- influxdb_measurement: The measurement that metrics are collected from. -- influxdb_memstats: Statistics about the memory allocator in the specified database. - - heap_inuse: The number of bytes in in-use spans. - - heap_released: The number of bytes of physical memory returned to the OS. - - mspan_inuse: The number of bytes in in-use mspans. - - total_alloc: The cumulative bytes allocated for heap objects. - - sys: The total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. - - mallocs: The total number of heap objects allocated. (The total number of live objects are frees.) - - frees: The cumulative number of freed (live) heap objects. - - heap_idle: The number of bytes of idle heap objects. - - pause_total_ns: The total time garbage collection cycles are paused in nanoseconds. - - lookups: The number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. - - heap_sys: The number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. - - mcache_sys: The bytes of memory obtained from the OS for mcache structures. - - next_gc: The target heap size of the next garbage collection cycle. - - gc_cpu_fraction: The fraction of CPU time used by the garbage collection cycle. - - other_sys: The number of bytes of memory used other than heap_sys, stacks_sys, mspan_sys, mcache_sys, buckhash_sys, and gc_sys. - - alloc: The currently allocated number of bytes of heap objects. - - stack_inuse: The number of bytes in in-use stacks. - - stack_sys: The total number of bytes of memory obtained from the stack in use. - - buck_hash_sys: The bytes of memory in profiling bucket hash tables. - - gc_sys: The bytes of memory in garbage collection metadata. - - num_gc: The number of completed garbage collection cycles. - - heap_alloc: The size, in bytes, of all heap objects. - - heap_objects: The number of allocated heap objects. - - mspan_sys: The bytes of memory obtained from the OS for mspan. - - mcache_inuse: The bytes of allocated mcache structures. - - last_gc: Time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch). -- influxdb_shard: The shard metrics are collected from. -- influxdb_subscriber: The InfluxDB subscription that metrics are collected from. -- influxdb_tsm1_cache: The TSM cache that metrics are collected from. -- influxdb_tsm1_wal: The TSM Write Ahead Log (WAL) that metrics are collected from. -- influxdb_write: The total writes to the specified database. +- **influxdb_ae** _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. + - **bytesRx**: Number of bytes received by the data node. + - **errors**: Total number of anti-entropy jobs that have resulted in errors. + - **jobs**: Total number of jobs executed by the data node. + - **jobsActive**: Number of active (currently executing) jobs. +- **influxdb_cluster** _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. + - **copyShardReq**: Number of internal requests made to copy a shard from one data node to another. + - **createIteratorReq**: Number of read requests from other data nodes in the cluster. + - **expandSourcesReq**: Number of remote node requests made to find measurements on this node that match a particular regular expression. + - **fieldDimensionsReq**: Number of remote node requests for information about the fields and associated types, and tag keys of measurements on this data node. + - **iteratorCostReq**: Number of internal requests for iterator cost. + - **removeShardReq**: Number of internal requests to delete a shard from this data node. Exclusively incremented by use of the influxd-ctl remove shard command. + - **writeShardFail**: Total number of internal write requests from a remote node that failed. + - **writeShardPointsReq**: Number of points in every internal write request from any remote node, regardless of success. + - **writeShardReq**: Number of internal write requests from a remote data node, regardless of success. +- **influxdb_cq**: Metrics related to continuous queries (CQs). + - **queryFail**: Total number of continuous queries that executed but failed. + - **queryOk**: Total number of continuous queries that executed successfully. +- **influxdb_database**: Database metrics are collected from. + - **numMeasurements**: Current number of measurements in the specified database. + - **numSeries**: Current series cardinality of the specified database. +- **influxdb_hh** _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. + - **writeShardReq**: Number of initial write requests handled by the hinted handoff engine for a remote node. + - **writeShardReqPoints**: Number of write requests for each point in the initial request to the hinted handoff engine for a remote node. +- **influxdb_hh_database** _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment†files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_hh_processor** _(Enterprise Only)_: Statistics stored for a single queue (shard). + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment†files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_httpd**: Metrics related to the InfluxDB HTTP server. + - **authFail**: Number of HTTP requests that were aborted due to authentication being required, but not supplied or incorrect. + - **clientError**: Number of HTTP responses due to client errors, with a 4XX HTTP status code. + - **fluxQueryReq**: Number of Flux query requests served. + - **fluxQueryReqDurationNs**: Duration (wall-time), in nanoseconds, spent executing Flux query requests. + - **pingReq**: Number of times InfluxDB HTTP server served the /ping HTTP endpoint. + - **pointsWrittenDropped**: Number of points dropped by the storage engine. + - **pointsWrittenFail**: Number of points accepted by the HTTP /write endpoint, but unable to be persisted. + - **pointsWrittenOK**: Number of points successfully accepted and persisted by the HTTP /write endpoint. + - **promReadReq**: Number of read requests to the Prometheus /read endpoint. + - **promWriteReq**: Number of write requests to the Prometheus /write endpoint. + - **queryReq**: Number of query requests. + - **queryReqDurationNs**: Total query request duration, in nanosecond (ns). + - **queryRespBytes**: Total number of bytes returned in query responses. + - **recoveredPanics**: Total number of panics recovered by the HTTP handler. + - **req**: Total number of HTTP requests served. + - **reqActive**: Number of currently active requests. + - **reqDurationNs**: Duration (wall time), in nanoseconds, spent inside HTTP requests. + - **serverError**: Number of HTTP responses due to server errors. + - **statusReq**: Number of status requests served using the HTTP /status endpoint. + - **valuesWrittenOK**: Number of values (fields) successfully accepted and persisted by the HTTP /write endpoint. + - **writeReq**: Number of write requests served using the HTTP /write endpoint. + - **writeReqActive**: Number of currently active write requests. + - **writeReqBytes**: Total number of bytes of line protocol data received by write requests, using the HTTP /write endpoint. + - **writeReqDurationNs**: Duration, in nanoseconds, of write requests served using the /write HTTP endpoint. +- **influxdb_memstats**: Statistics about the memory allocator in the specified database. + - **Alloc**: Number of bytes allocated to heap objects. + - **BuckHashSys**: Number of bytes of memory in profiling bucket hash tables. + - **Frees**: Cumulative count of heap objects freed. + - **GCCPUFraction**: fraction of InfluxDB's available CPU time used by the garbage collector (GC) since InfluxDB started. + - **GCSys**: Number of bytes of memory in garbage collection metadata. + - **HeapAlloc**: Number of bytes of allocated heap objects. + - **HeapIdle**: Number of bytes in idle (unused) spans. + - **HeapInuse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. + - **LastGC**: Time the last garbage collection finished. + - **Lookups**: Number of pointer lookups performed by the runtime. + - **MCacheInuse**: Number of bytes of allocated mcache structures. + - **MCacheSys**: Number of bytes of memory obtained from the OS for mcache structures. + - **MSpanInuse**: Number of bytes of allocated mspan structures. + - **MSpanSys**: Number of bytes of memory obtained from the OS for mspan structures. + - **Mallocs**: Cumulative count of heap objects allocated. + - **NextGC**: Target heap size of the next GC cycle. + - **NumForcedGC**: Number of GC cycles that were forced by the application calling the GC function. + - **NumGC**: Number of completed GC cycles. + - **OtherSys**: Number of bytes of memory in miscellaneous off-heap runtime allocations. + - **PauseTotalNs**: Cumulative nanoseconds in GC stop-the-world pauses since the program started. + - **StackInuse**: Number of bytes in stack spans. + - **StackSys**: Number of bytes of stack memory obtained from the OS. + - **Sys**: Total bytes of memory obtained from the OS. + - **TotalAlloc**: Cumulative bytes allocated for heap objects. +- **influxdb_queryExecutor**: Metrics related to usage of the Query Executor of the InfluxDB engine. + - **queriesActive**: Number of active queries currently being handled. + - **queriesExecuted**: Number of queries executed (started). + - **queriesFinished**: Number of queries that have finished executing. + - **queryDurationNs**: Total duration, in nanoseconds, of executed queries. + - **recoveredPanics**: Number of panics recovered by the Query Executor. +- **influxdb_rpc** _(Enterprise Only)_ : Statistics related to the use of RPC calls within InfluxDB Enterprise clusters. + - **idleStreams**: Number of idle multiplexed streams across all live TCP connections. + - **liveConnections**: Current number of live TCP connections to other nodes. + - **liveStreams**: Current number of live multiplexed streams across all live TCP connections. + - **rpcCalls**: Total number of RPC calls made to remote nodes. + - **rpcFailures**: Total number of RPC failures, which are RPCs that did not recover. + - **rpcReadBytes**: Total number of RPC bytes read. + - **rpcRetries**: Total number of RPC calls that retried at least once. + - **rpcWriteBytes**: Total number of RPC bytes written. + - **singleUse**: Total number of single-use connections opened using Dial. + - **singleUseOpen**: Number of single-use connections currently open. + - **totalConnections**: Total number of TCP connections that have been established. + - **totalStreams**: Total number of streams established. +- **influxdb_runtime**: Subset of memstat record statistics for the Go memory allocator. + - **Alloc**: Currently allocated number of bytes of heap objects. + - **Frees**: Cumulative number of freed (live) heap objects. + - **HeapAlloc**: Size, in bytes, of all heap objects. + - **HeapIdle**: Number of bytes of idle heap objects. + - **HeapInUse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. + - **Lookups**: Number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. + - **Mallocs**: Total number of heap objects allocated. The total number of live objects is Frees. + - **NumGC**: Number of completed GC (garbage collection) cycles. + - **NumGoroutine**: Total number of Go routines. + - **PauseTotalNs**: Total duration, in nanoseconds, of total GC (garbage collection) pauses. + - **Sys**: Total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. + - **TotalAlloc**: Total number of bytes allocated for heap objects. This statistic does not decrease when objects are freed. +- **influxdb_shard**: Metrics related to InfluxDB shards. + - **diskBytes**: Size, in bytes, of the shard, including the size of the data directory and the WAL directory. + - **fieldsCreate**: Number of fields created. + - **indexType**: Type of index inmem or tsi1. + - **n_shards**: Total number of shards in the specified database. + - **seriesCreate**: Number of series created. + - **writeBytes**: Number of bytes written to the shard. + - **writePointsDropped**: Number of requests to write points t dropped from a write. + - **writePointsErr**: Number of requests to write points that failed to be written due to errors. + - **writePointsOk**: Number of points written successfully. + - **writeReq**: Total number of write requests. + - **writeReqErr**: Total number of write requests that failed due to errors. + - **writeReqOk**: Total number of successful write requests. +- **influxdb_subscriber**: InfluxDB subscription metrics. + - **createFailures**: Number of subscriptions that failed to be created. + - **pointsWritten**: Total number of points that were successfully written to subscribers. + - **writeFailures**: Total number of batches that failed to be written to subscribers. +- **influxdb_tsm1_cache**: TSM cache metrics. + - **cacheAgeMs**: Duration, in milliseconds, since the cache was last snapshotted at sample time. + - **cachedBytes**: Total number of bytes that have been written into snapshots. + - **diskBytes**: Size, in bytes, of on-disk snapshots. + - **memBytes**: Size, in bytes, of in-memory cache. + - **snapshotCount**: Current level (number) of active snapshots. + - **WALCompactionTimeMs**: Duration, in milliseconds, that the commit lock is held while compacting snapshots. + - **writeDropped**: Total number of writes dropped due to timeouts. + - **writeErr**: Total number of writes that failed. + - **writeOk**: Total number of successful writes. +- **influxdb_tsm1_engine**: TSM storage engine metrics. + - **cacheCompactionDuration** Duration (wall time), in nanoseconds, spent in cache compactions. + - **cacheCompactionErr** Number of cache compactions that have failed due to errors. + - **cacheCompactions** Total number of cache compactions that have ever run. + - **cacheCompactionsActive** Number of cache compactions that are currently running. + - **tsmFullCompactionDuration** Duration (wall time), in nanoseconds, spent in full compactions. + - **tsmFullCompactionErr** Total number of TSM full compactions that have failed due to errors. + - **tsmFullCompactionQueue** Current number of pending TMS Full compactions. + - **tsmFullCompactions** Total number of TSM full compactions that have ever run. + - **tsmFullCompactionsActive** Number of TSM full compactions currently running. + - **tsmLevel1CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 1 compactions. + - **tsmLevel1CompactionErr** Total number of TSM level 1 compactions that have failed due to errors. + - **tsmLevel1CompactionQueue** Current number of pending TSM level 1 compactions. + - **tsmLevel1Compactions** Total number of TSM level 1 compactions that have ever run. + - **tsmLevel1CompactionsActive** Number of TSM level 1 compactions that are currently running. + - **tsmLevel2CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 2 compactions. + - **tsmLevel2CompactionErr** Number of TSM level 2 compactions that have failed due to errors. + - **tsmLevel2CompactionQueue** Current number of pending TSM level 2 compactions. + - **tsmLevel2Compactions** Total number of TSM level 2 compactions that have ever run. + - **tsmLevel2CompactionsActive** Number of TSM level 2 compactions that are currently running. + - **tsmLevel3CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 3 compactions. + - **tsmLevel3CompactionErr** Number of TSM level 3 compactions that have failed due to errors. + - **tsmLevel3CompactionQueue** Current number of pending TSM level 3 compactions. + - **tsmLevel3Compactions** Total number of TSM level 3 compactions that have ever run. + - **tsmLevel3CompactionsActive** Number of TSM level 3 compactions that are currently running. + - **tsmOptimizeCompactionDuration** Duration (wall time), in nanoseconds, spent during TSM optimize compactions. + - **tsmOptimizeCompactionErr** Total number of TSM optimize compactions that have failed due to errors. + - **tsmOptimizeCompactionQueue** Current number of pending TSM optimize compactions. + - **tsmOptimizeCompactions** Total number of TSM optimize compactions that have ever run. + - **tsmOptimizeCompactionsActive** Number of TSM optimize compactions that are currently running. +- **influxdb_tsm1_filestore**: The TSM file store metrics. + - **diskBytes**: Size, in bytes, of disk usage by the TSM file store. + - **numFiles**: Total number of files in the TSM file store. +- **influxdb_tsm1_wal**: The TSM Write Ahead Log (WAL) metrics. + - **currentSegmentDiskBytes**: Current size, in bytes, of the segment disk. + - **oldSegmentDiskBytes**: Size, in bytes, of the segment disk. + - **writeErr**: Number of writes that failed due to errors. + - **writeOK**: Number of writes that succeeded. +- **influxdb_write**: Metrics related to InfluxDB writes. + - **pointReq**: Total number of points requested to be written. + - **pointReqHH** _(Enterprise only)_: Total number of points received for write by this node and then enqueued into hinted handoff for the destination node. + - **pointReqLocal** _(Enterprise only)_: Total number of point requests that have been attempted to be written into a shard on the same (local) node. + - **pointReqRemote** _(Enterprise only)_: Total number of points received for write by this node but needed to be forwarded into a shard on a remote node. + - **pointsWrittenOK**: Number of points written to the HTTP /write endpoint and persisted successfully. + - **req**: Total number of batches requested to be written. + - **subWriteDrop**: Total number of batches that failed to be sent to the subscription dispatcher. + - **subWriteOk**: Total number of batches successfully sent to the subscription dispatcher. + - **valuesWrittenOK**: Number of values (fields) written to the HTTP /write endpoint and persisted successfully. + - **writeDrop**: Total number of write requests for points that have been dropped due to timestamps not matching any existing retention policies. + - **writeError**: Total number of batches of points that were not successfully written, due to a failure to write to a local or remote shard. + - **writeOk**: Total number of batches of points written at the requested consistency level. + - **writePartial** _(Enterprise only)_: Total number of batches written to at least one node, but did not meet the requested consistency level. + - **writeTimeout**: Total number of write requests that failed to complete within the default write timeout duration. ### Example Output: diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index d7eb66153034a..b8c028f05aff8 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -33,10 +34,10 @@ func (e *APIError) Error() string { } type InfluxDB struct { - URLs []string `toml:"urls"` - Username string `toml:"username"` - Password string `toml:"password"` - Timeout internal.Duration `toml:"timeout"` + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -86,10 +87,10 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { } i.client = &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: i.Timeout.Duration, + ResponseHeaderTimeout: time.Duration(i.Timeout), TLSClientConfig: tlsCfg, }, - Timeout: i.Timeout.Duration, + Timeout: time.Duration(i.Timeout), } } @@ -318,7 +319,7 @@ func readResponseError(resp *http.Response) error { func init() { inputs.Add("influxdb", func() telegraf.Input { return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 27ea81b6d7dd6..93a02a19e56a7 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -14,7 +14,8 @@ import ( func TestBasic(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(basicJSON)) + _, err := w.Write([]byte(basicJSON)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -61,7 +62,8 @@ func TestBasic(t *testing.T) { func TestInfluxDB(t *testing.T) { fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(influxReturn)) + _, err := w.Write([]byte(influxReturn)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -121,7 +123,8 @@ func TestInfluxDB(t *testing.T) { func TestInfluxDB2(t *testing.T) { fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(influxReturn2)) + _, err := w.Write([]byte(influxReturn2)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -146,7 +149,8 @@ func TestInfluxDB2(t *testing.T) { func TestErrorHandling(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte("not json")) + _, err := w.Write([]byte("not json")) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -164,7 +168,8 @@ func TestErrorHandling(t *testing.T) { func TestErrorHandling404(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(basicJSON)) + _, err := w.Write([]byte(basicJSON)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -182,7 +187,8 @@ func TestErrorHandling404(t *testing.T) { func TestErrorResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + _, err := w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index aae77fb965f7a..0912c36087b75 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -75,5 +75,5 @@ Metrics are created from InfluxDB Line Protocol in the request body. curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/guides/writing_data/ +[influxdb_http_api]: https://docs.influxdata.com/influxdb/v1.8/guides/write_data/ [http_listener_v2]: /plugins/inputs/http_listener_v2/README.md diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 07d27ebbd934d..6b5c67ea07999 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -29,14 +30,14 @@ type InfluxDBListener struct { port int tlsint.ServerConfig - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` - MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - DatabaseTag string `toml:"database_tag"` - RetentionPolicyTag string `toml:"retention_policy_tag"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MaxBodySize config.Size `toml:"max_body_size"` + MaxLineSize config.Size `toml:"max_line_size"` // deprecated in 1.14; ignored + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + DatabaseTag string `toml:"database_tag"` + RetentionPolicyTag string `toml:"retention_policy_tag"` timeFunc influx.TimeFunc @@ -137,19 +138,19 @@ func (h *InfluxDBListener) Init() error { h.authFailures = selfstat.Register("influxdb_listener", "auth_failures", tags) h.routes() - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } - if h.MaxLineSize.Size != 0 { + if h.MaxLineSize != 0 { h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored") } - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 + if h.ReadTimeout < config.Duration(time.Second) { + h.ReadTimeout = config.Duration(time.Second * 10) } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 + if h.WriteTimeout < config.Duration(time.Second) { + h.WriteTimeout = config.Duration(time.Second * 10) } return nil @@ -167,8 +168,8 @@ func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error { h.server = http.Server{ Addr: h.ServiceAddress, Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), TLSConfig: tlsConf, } @@ -221,7 +222,10 @@ func (h *InfluxDBListener) handleQuery() http.HandlerFunc { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.WriteHeader(http.StatusOK) - res.Write([]byte("{\"results\":[]}")) + _, err := res.Write([]byte("{\"results\":[]}")) + if err != nil { + h.Log.Debugf("error writing result in handleQuery: %v", err) + } } } @@ -236,7 +240,9 @@ func (h *InfluxDBListener) handlePing() http.HandlerFunc { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusOK) b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above - res.Write(b) + if _, err := res.Write(b); err != nil { + h.Log.Debugf("error writing result in handlePing: %v", err) + } } else { res.WriteHeader(http.StatusNoContent) } @@ -254,8 +260,10 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { return func(res http.ResponseWriter, req *http.Request) { defer h.writesServed.Incr(1) // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res) + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } @@ -263,14 +271,16 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { rp := req.URL.Query().Get("rp") body := req.Body - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + body = http.MaxBytesReader(res, body, int64(h.MaxBodySize)) // Handle gzip request bodies if req.Header.Get("Content-Encoding") == "gzip" { var err error body, err = gzip.NewReader(body) if err != nil { h.Log.Debugf("Error decompressing request body: %v", err.Error()) - badRequest(res, err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } defer body.Close() @@ -288,7 +298,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { var m telegraf.Metric var err error var parseErrorCount int - var lastPos int = 0 + var lastPos int var firstParseErrorStr string for { select { @@ -306,7 +316,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { // Continue parsing metrics even if some are malformed if parseErr, ok := err.(*influx.ParseError); ok { - parseErrorCount += 1 + parseErrorCount++ errStr := parseErr.Error() if firstParseErrorStr == "" { firstParseErrorStr = errStr @@ -327,24 +337,27 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { } h.acc.AddMetric(m) - } if err != influx.EOF { h.Log.Debugf("Error parsing the request body: %v", err.Error()) - badRequest(res, err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } if parseErrorCount > 0 { var partialErrorString string switch parseErrorCount { case 1: - partialErrorString = fmt.Sprintf("%s", firstParseErrorStr) + partialErrorString = firstParseErrorStr case 2: partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr) default: partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1) } - partialWrite(res, partialErrorString) + if err := partialWrite(res, partialErrorString); err != nil { + h.Log.Debugf("error in partial-write: %v", err) + } return } @@ -353,15 +366,16 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { } } -func tooLarge(res http.ResponseWriter) { +func tooLarge(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.Header().Set("X-Influxdb-Error", "http: request body too large") res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) + _, err := res.Write([]byte(`{"error":"http: request body too large"}`)) + return err } -func badRequest(res http.ResponseWriter, errString string) { +func badRequest(res http.ResponseWriter, errString string) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") if errString == "" { @@ -369,15 +383,17 @@ func badRequest(res http.ResponseWriter, errString string) { } res.Header().Set("X-Influxdb-Error", errString) res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + _, err := res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + return err } -func partialWrite(res http.ResponseWriter, errString string) { +func partialWrite(res http.ResponseWriter, errString string) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.Header().Set("X-Influxdb-Error", errString) res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + _, err := res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + return err } func getPrecisionMultiplier(precision string) time.Duration { diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go index d3dc552192007..f0bfc695c98a3 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" ) @@ -20,9 +20,7 @@ func newListener() *InfluxDBListener { acc: &testutil.NopAccumulator{}, bytesRecv: selfstat.Register("influxdb_listener", "bytes_received", map[string]string{}), writesServed: selfstat.Register("influxdb_listener", "writes_served", map[string]string{}), - MaxBodySize: internal.Size{ - Size: defaultMaxBodySize, - }, + MaxBodySize: config.Size(defaultMaxBodySize), } return listener } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 5c934e371bfc7..36952f6851064 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -4,19 +4,20 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -117,7 +118,7 @@ func TestWriteSecureNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -132,7 +133,7 @@ func TestWriteSecureWithClientAuth(t *testing.T) { // post single message to listener resp, err := getSecureClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -151,7 +152,7 @@ func TestWriteBasicAuth(t *testing.T) { req.SetBasicAuth(basicUsername, basicPassword) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -169,7 +170,7 @@ func TestWriteKeepDatabase(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -181,7 +182,7 @@ func TestWriteKeepDatabase(t *testing.T) { // post single message to listener with a database tag in it already. It should be clobbered. resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -193,7 +194,7 @@ func TestWriteKeepDatabase(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -218,7 +219,7 @@ func TestWriteRetentionPolicyTag(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", "rp=myrp"), "", bytes.NewBuffer([]byte("cpu time_idle=42"))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.Equal(t, 204, resp.StatusCode) expected := []telegraf.Metric{ @@ -250,7 +251,7 @@ func TestWriteNoNewline(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -271,7 +272,7 @@ func TestPartialWrite(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testPartial))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) acc.Wait(1) @@ -300,7 +301,7 @@ func TestWriteMaxLineSizeIncrease(t *testing.T) { // Post a gigantic metric to the listener and verify that it writes OK this time: resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -308,7 +309,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { listener := &InfluxDBListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:0", - MaxBodySize: internal.Size{Size: 4096}, + MaxBodySize: config.Size(4096), timeFunc: time.Now, } @@ -319,7 +320,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) } @@ -339,7 +340,7 @@ func TestWriteLargeLine(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) //todo: with the new parser, long lines aren't a problem. Do we need to skip them? //require.EqualValues(t, 400, resp.StatusCode) @@ -406,7 +407,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) @@ -416,6 +417,7 @@ func TestWriteGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", @@ -449,15 +451,21 @@ func TestWriteHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) @@ -474,7 +482,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 404, resp.StatusCode) } @@ -489,7 +497,7 @@ func TestWriteInvalid(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -504,7 +512,7 @@ func TestWriteEmpty(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -520,6 +528,7 @@ func TestQuery(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } @@ -535,7 +544,7 @@ func TestPing(t *testing.T) { require.NoError(t, err) require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) require.Len(t, resp.Header["Content-Type"], 0) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -551,7 +560,7 @@ func TestPingVerbose(t *testing.T) { require.NoError(t, err) require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } @@ -567,7 +576,7 @@ func TestWriteWithPrecision(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -592,7 +601,7 @@ func TestWriteWithPrecisionNoTimestamp(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -638,7 +647,7 @@ func TestWriteParseErrors(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(tt.input))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) require.Equal(t, tt.expected, resp.Header["X-Influxdb-Error"][0]) }) diff --git a/plugins/inputs/influxdb_v2_listener/README.md b/plugins/inputs/influxdb_v2_listener/README.md index 4258e021d85fd..71fa6c19bee3a 100644 --- a/plugins/inputs/influxdb_v2_listener/README.md +++ b/plugins/inputs/influxdb_v2_listener/README.md @@ -53,4 +53,4 @@ Metrics are created from InfluxDB Line Protocol in the request body. curl -i -XPOST 'http://localhost:8186/api/v2/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -[influxdb_http_api]: https://v2.docs.influxdata.com/v2.0/api/ +[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/api/ diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 30c449f7dd910..4df2f7dc86a5e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -6,12 +6,13 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -39,9 +40,9 @@ type InfluxDBV2Listener struct { port int tlsint.ServerConfig - MaxBodySize internal.Size `toml:"max_body_size"` - Token string `toml:"token"` - BucketTag string `toml:"bucket_tag"` + MaxBodySize config.Size `toml:"max_body_size"` + Token string `toml:"token"` + BucketTag string `toml:"bucket_tag"` timeFunc influx.TimeFunc @@ -134,8 +135,8 @@ func (h *InfluxDBV2Listener) Init() error { h.authFailures = selfstat.Register("influxdb_v2_listener", "auth_failures", tags) h.routes() - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } return nil @@ -210,7 +211,9 @@ func (h *InfluxDBV2Listener) handleReady() http.HandlerFunc { "started": h.startTime.Format(time.RFC3339Nano), "status": "ready", "up": h.timeFunc().Sub(h.startTime).String()}) - res.Write(b) + if _, err := res.Write(b); err != nil { + h.Log.Debugf("error writing in handle-ready: %v", err) + } } } @@ -225,22 +228,26 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { return func(res http.ResponseWriter, req *http.Request) { defer h.writesServed.Incr(1) // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res, h.MaxBodySize.Size) + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res, int64(h.MaxBodySize)); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } bucket := req.URL.Query().Get("bucket") body := req.Body - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + body = http.MaxBytesReader(res, body, int64(h.MaxBodySize)) // Handle gzip request bodies if req.Header.Get("Content-Encoding") == "gzip" { var err error body, err = gzip.NewReader(body) if err != nil { h.Log.Debugf("Error decompressing request body: %v", err.Error()) - badRequest(res, Invalid, err.Error()) + if err := badRequest(res, Invalid, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } defer body.Close() @@ -249,10 +256,12 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { var readErr error var bytes []byte //body = http.MaxBytesReader(res, req.Body, 1000000) //p.MaxBodySize.Size) - bytes, readErr = ioutil.ReadAll(body) + bytes, readErr = io.ReadAll(body) if readErr != nil { h.Log.Debugf("Error parsing the request body: %v", readErr.Error()) - badRequest(res, InternalError, readErr.Error()) + if err := badRequest(res, InternalError, readErr.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } metricHandler := influx.NewMetricHandler() @@ -272,7 +281,9 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { if err != influx.EOF && err != nil { h.Log.Debugf("Error parsing the request body: %v", err.Error()) - badRequest(res, Invalid, err.Error()) + if err := badRequest(res, Invalid, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } @@ -290,7 +301,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { } } -func tooLarge(res http.ResponseWriter, maxLength int64) { +func tooLarge(res http.ResponseWriter, maxLength int64) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Error", "http: request body too large") res.WriteHeader(http.StatusRequestEntityTooLarge) @@ -298,10 +309,11 @@ func tooLarge(res http.ResponseWriter, maxLength int64) { "code": fmt.Sprint(Invalid), "message": "http: request body too large", "maxLength": fmt.Sprint(maxLength)}) - res.Write(b) + _, err := res.Write(b) + return err } -func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) { +func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) error { res.Header().Set("Content-Type", "application/json") if errString == "" { errString = "http: bad request" @@ -314,7 +326,8 @@ func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) "op": "", "err": errString, }) - res.Write(b) + _, err := res.Write(b) + return err } func getPrecisionMultiplier(precision string) time.Duration { diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go index e1e2c7090b359..219d59a93863e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" ) @@ -20,9 +20,7 @@ func newListener() *InfluxDBV2Listener { acc: &testutil.NopAccumulator{}, bytesRecv: selfstat.Register("influxdb_v2_listener", "bytes_received", map[string]string{}), writesServed: selfstat.Register("influxdb_v2_listener", "writes_served", map[string]string{}), - MaxBodySize: internal.Size{ - Size: defaultMaxBodySize, - }, + MaxBodySize: config.Size(defaultMaxBodySize), } return listener } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index 2a80bb4d351e6..4338f34f89567 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -5,18 +5,20 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "runtime" "strconv" "sync" "testing" "time" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) const ( @@ -115,7 +117,7 @@ func TestWriteSecureNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -130,7 +132,7 @@ func TestWriteSecureWithClientAuth(t *testing.T) { // post single message to listener resp, err := getSecureClient().Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -149,7 +151,7 @@ func TestWriteTokenAuth(t *testing.T) { req.Header.Set("Authorization", fmt.Sprintf("Token %s", token)) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -167,7 +169,7 @@ func TestWriteKeepBucket(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -179,7 +181,7 @@ func TestWriteKeepBucket(t *testing.T) { // post single message to listener with a database tag in it already. It should be clobbered. resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgWithDB))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -191,7 +193,7 @@ func TestWriteKeepBucket(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -217,7 +219,7 @@ func TestWriteNoNewline(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -238,7 +240,7 @@ func TestAllOrNothing(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testPartial))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -257,7 +259,7 @@ func TestWriteMaxLineSizeIncrease(t *testing.T) { // Post a gigantic metric to the listener and verify that it writes OK this time: resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -265,7 +267,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { listener := &InfluxDBV2Listener{ Log: testutil.Logger{}, ServiceAddress: "localhost:0", - MaxBodySize: internal.Size{Size: 4096}, + MaxBodySize: config.Size(4096), timeFunc: time.Now, } @@ -276,7 +278,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) } @@ -296,7 +298,7 @@ func TestWriteLargeLine(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) //todo: with the new parser, long lines aren't a problem. Do we need to skip them? //require.EqualValues(t, 400, resp.StatusCode) @@ -363,7 +365,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer(data)) @@ -373,6 +375,7 @@ func TestWriteGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", @@ -406,15 +409,21 @@ func TestWriteHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) @@ -431,7 +440,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 404, resp.StatusCode) } @@ -446,7 +455,7 @@ func TestWriteInvalid(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -461,7 +470,7 @@ func TestWriteEmpty(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -479,10 +488,10 @@ func TestReady(t *testing.T) { resp, err := http.Get(createURL(listener, "http", "/api/v2/ready", "")) require.NoError(t, err) require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Contains(t, string(bodyBytes), "\"status\":\"ready\"") - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } @@ -498,7 +507,7 @@ func TestWriteWithPrecision(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -523,7 +532,7 @@ func TestWriteWithPrecisionNoTimestamp(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md new file mode 100644 index 0000000000000..009c8cafc1cfb --- /dev/null +++ b/plugins/inputs/intel_powerstat/README.md @@ -0,0 +1,204 @@ +# Intel PowerStat Input Plugin +This input plugin monitors power statistics on Intel-based platforms and assumes presence of Linux based OS. + +Main use cases are power saving and workload migration. Telemetry frameworks allow users to monitor critical platform level metrics. +Key source of platform telemetry is power domain that is beneficial for MANO/Monitoring&Analytics systems +to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization and power statistics. + +### Configuration: +```toml +# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. +[[inputs.intel_powerstat]] + ## All global metrics are always collected by Intel PowerStat plugin. + ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. + ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level + ## telemetry will be exposed by Intel PowerStat plugin. + ## Supported options: + ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" + # cpu_metrics = [] +``` +### Example: Configuration with no per-CPU telemetry +This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: +```toml +[[inputs.intel_powerstat]] + cpu_metrics = [] +``` + +### Example: Configuration with no per-CPU telemetry - equivalent case +This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: +```toml +[[inputs.intel_powerstat]] +``` + +### Example: Configuration for CPU Temperature and Frequency only +This configuration allows getting global metrics plus subset of per-CPU metrics (CPU Temperature and Current Frequency): +```toml +[[inputs.intel_powerstat]] + cpu_metrics = ["cpu_frequency", "cpu_temperature"] +``` + +### Example: Configuration with all available metrics +This configuration allows getting global metrics and all per-CPU metrics: +```toml +[[inputs.intel_powerstat]] + cpu_metrics = ["cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles"] +``` + +### SW Dependencies: +Plugin is based on Linux Kernel modules that expose specific metrics over `sysfs` or `devfs` interfaces. +The following dependencies are expected by plugin: +- _intel-rapl_ module which exposes Intel Runtime Power Limiting metrics over `sysfs` (`/sys/devices/virtual/powercap/intel-rapl`), +- _msr_ kernel module that provides access to processor model specific registers over `devfs` (`/dev/cpu/cpu%d/msr`), +- _cpufreq_ kernel module - which exposes per-CPU Frequency over `sysfs` (`/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq`). + +Minimum kernel version required is 3.13 to satisfy all requirements. + +Please make sure that kernel modules are loaded and running. You might have to manually enable them by using `modprobe`. +Exact commands to be executed are: +``` +sudo modprobe cpufreq-stats +sudo modprobe msr +sudo modprobe intel_rapl +``` + +**Telegraf with Intel PowerStat plugin enabled may require root access to read model specific registers (MSRs)** +to retrieve data for calculation of most critical per-CPU specific metrics: +- `cpu_busy_frequency_mhz` +- `cpu_temperature_celsius` +- `cpu_c1_state_residency_percent` +- `cpu_c6_state_residency_percent` +- `cpu_busy_cycles_percent` + +To expose other Intel PowerStat metrics root access may or may not be required (depending on OS type or configuration). + +### HW Dependencies: +Specific metrics require certain processor features to be present, otherwise Intel PowerStat plugin won't be able to +read them. When using Linux Kernel based OS, user can detect supported processor features reading `/proc/cpuinfo` file. +Plugin assumes crucial properties are the same for all CPU cores in the system. +The following processor properties are examined in more detail in this section: +processor _cpu family_, _model_ and _flags_. +The following processor properties are required by the plugin: +- Processor _cpu family_ must be Intel (0x6) - since data used by the plugin assumes Intel specific +model specific registers for all features +- The following processor flags shall be present: + - "_msr_" shall be present for plugin to read platform data from processor model specific registers and collect + the following metrics: _powerstat_core.cpu_temperature_, _powerstat_core.cpu_busy_frequency_, + _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_, _powerstat_core._cpu_c6_state_residency_ + - "_aperfmperf_" shall be present to collect the following metrics: _powerstat_core.cpu_busy_frequency_, + _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_ + - "_dts_" shall be present to collect _powerstat_core.cpu_temperature_ +- Processor _Model number_ must be one of the following values for plugin to read _powerstat_core.cpu_c1_state_residency_ +and _powerstat_core.cpu_c6_state_residency_ metrics: + +| Model number | Processor name | +|-----|-------------| +| 0x37 | Intel Atom® Bay Trail | +| 0x4D | Intel Atom® Avaton | +| 0x5C | Intel Atom® Apollo Lake | +| 0x5F | Intel Atom® Denverton | +| 0x7A | Intel Atom® Goldmont | +| 0x4C | Intel Atom® Airmont | +| 0x86 | Intel Atom® Jacobsville | +| 0x96 | Intel Atom® Elkhart Lake | +| 0x9C | Intel Atom® Jasper Lake | +| 0x1A | Intel Nehalem-EP | +| 0x1E | Intel Nehalem | +| 0x1F | Intel Nehalem-G | +| 0x2E | Intel Nehalem-EX | +| 0x25 | Intel Westmere | +| 0x2C | Intel Westmere-EP | +| 0x2F | Intel Westmere-EX | +| 0x2A | Intel Sandybridge | +| 0x2D | Intel Sandybridge-X | +| 0x3A | Intel Ivybridge | +| 0x3E | Intel Ivybridge-X | +| 0x4E | Intel Atom® Silvermont-MID | +| 0x5E | Intel Skylake | +| 0x55 | Intel Skylake-X | +| 0x8E | Intel Kabylake-L | +| 0x9E | Intel Kabylake | +| 0x6A | Intel Icelake-X | +| 0x6C | Intel Icelake-D | +| 0x7D | Intel Icelake | +| 0x7E | Intel Icelake-L | +| 0x9D | Intel Icelake-NNPI | +| 0x3C | Intel Haswell | +| 0x3F | Intel Haswell-X | +| 0x45 | Intel Haswell-L | +| 0x46 | Intel Haswell-G | +| 0x3D | Intel Broadwell | +| 0x47 | Intel Broadwell-G | +| 0x4F | Intel Broadwell-X | +| 0x56 | Intel Broadwell-D | +| 0x66 | Intel Cannonlake-L | +| 0x57 | Intel Xeon® PHI Knights Landing | +| 0x85 | Intel Xeon® PHI Knights Mill | +| 0xA5 | Intel CometLake | +| 0xA6 | Intel CometLake-L | +| 0x8F | Intel Sapphire Rapids X | +| 0x8C | Intel TigerLake-L | +| 0x8D | Intel TigerLake | + +### Metrics +All metrics collected by Intel PowerStat plugin are collected in fixed intervals. +Metrics that reports processor C-state residency or power are calculated over elapsed intervals. +When starting to measure metrics, plugin skips first iteration of metrics if they are based on deltas with previous value. + +**The following measurements are supported by Intel PowerStat plugin:** +- powerstat_core + + - The following Tags are returned by plugin with powerstat_core measurements: + + | Tag | Description | + |-----|-------------| + | `package_id` | ID of platform package/socket | + | `core_id` | ID of physical processor core | + | `cpu_id` | ID of logical processor core | + Measurement powerstat_core metrics are collected per-CPU (cpu_id is the key) + while core_id and package_id tags are additional topology information. + + - Available metrics for powerstat_core measurement + + | Metric name (field) | Description | Units | + |-----|-------------|-----| + | `cpu_frequency_mhz` | Current operational frequency of CPU Core | MHz | + | `cpu_busy_frequency_mhz` | CPU Core Busy Frequency measured as frequency adjusted to CPU Core busy cycles | MHz | + | `cpu_temperature_celsius` | Current temperature of CPU Core | Celsius degrees | + | `cpu_c1_state_residency_percent` | Percentage of time that CPU Core spent in C1 Core residency state | % | + | `cpu_c6_state_residency_percent` | Percentage of time that CPU Core spent in C6 Core residency state | % | + | `cpu_busy_cycles_percent` | CPU Core Busy cycles as a ratio of Cycles spent in C0 state residency to all cycles executed by CPU Core | % | + + + +- powerstat_package + + - The following Tags are returned by plugin with powerstat_package measurements: + + | Tag | Description | + |-----|-------------| + | `package_id` | ID of platform package/socket | + Measurement powerstat_package metrics are collected per processor package - _package_id_ tag indicates which + package metric refers to. + + - Available metrics for powerstat_package measurement + + | Metric name (field) | Description | Units | + |-----|-------------|-----| + | `thermal_design_power_watts` | Maximum Thermal Design Power (TDP) available for processor package | Watts | + | `current_power_consumption_watts` | Current power consumption of processor package | Watts | + | `current_dram_power_consumption_watts` | Current power consumption of processor package DRAM subsystem | Watts | + + +### Example Output: + +``` +powerstat_package,host=ubuntu,package_id=0 thermal_design_power_watts=160 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0 current_power_consumption_watts=35 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0 current_dram_power_consumption_watts=13.94 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_frequency_mhz=1200.29 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_temperature_celsius=34i 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c6_state_residency_percent=92.52 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_busy_cycles_percent=0.8 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c1_state_residency_percent=6.68 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_busy_frequency_mhz=1213.24 1606494744000000000 +``` diff --git a/plugins/inputs/intel_powerstat/dto.go b/plugins/inputs/intel_powerstat/dto.go new file mode 100644 index 0000000000000..eb3da0bc269f7 --- /dev/null +++ b/plugins/inputs/intel_powerstat/dto.go @@ -0,0 +1,37 @@ +package intel_powerstat + +type msrData struct { + mperf uint64 + aperf uint64 + timeStampCounter uint64 + c3 uint64 + c6 uint64 + c7 uint64 + throttleTemp uint64 + temp uint64 + mperfDelta uint64 + aperfDelta uint64 + timeStampCounterDelta uint64 + c3Delta uint64 + c6Delta uint64 + c7Delta uint64 + readDate int64 +} + +type raplData struct { + dramCurrentEnergy float64 + socketCurrentEnergy float64 + socketEnergy float64 + dramEnergy float64 + readDate int64 +} + +type cpuInfo struct { + physicalID string + coreID string + cpuID string + vendorID string + cpuFamily string + model string + flags string +} diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go new file mode 100644 index 0000000000000..c69dea89f4e26 --- /dev/null +++ b/plugins/inputs/intel_powerstat/file.go @@ -0,0 +1,154 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" +) + +// fileService is responsible for handling operations on files. +type fileService interface { + getCPUInfoStats() (map[string]*cpuInfo, error) + getStringsMatchingPatternOnPath(path string) ([]string, error) + readFile(path string) ([]byte, error) + readFileToFloat64(reader io.Reader) (float64, int64, error) + readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) +} + +type fileServiceImpl struct { +} + +// getCPUInfoStats retrieves basic information about CPU from /proc/cpuinfo. +func (fs *fileServiceImpl) getCPUInfoStats() (map[string]*cpuInfo, error) { + path := "/proc/cpuinfo" + cpuInfoFile, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("error while reading %s, err: %v", path, err) + } + defer cpuInfoFile.Close() + + scanner := bufio.NewScanner(cpuInfoFile) + + processorRegexp := regexp.MustCompile(`^processor\t+:\s([0-9]+)\n*$`) + physicalIDRegexp := regexp.MustCompile(`^physical id\t+:\s([0-9]+)\n*$`) + coreIDRegexp := regexp.MustCompile(`^core id\t+:\s([0-9]+)\n*$`) + vendorIDRegexp := regexp.MustCompile(`^vendor_id\t+:\s([a-zA-Z]+)\n*$`) + cpuFamilyRegexp := regexp.MustCompile(`^cpu\sfamily\t+:\s([0-9]+)\n*$`) + modelRegexp := regexp.MustCompile(`^model\t+:\s([0-9]+)\n*$`) + flagsRegexp := regexp.MustCompile(`^flags\t+:\s(.+)\n*$`) + + stats := make(map[string]*cpuInfo) + currentInfo := &cpuInfo{} + + for scanner.Scan() { + line := scanner.Text() + + processorRes := processorRegexp.FindStringSubmatch(line) + if len(processorRes) > 1 { + currentInfo = &cpuInfo{ + cpuID: processorRes[1], + } + } + + vendorIDRes := vendorIDRegexp.FindStringSubmatch(line) + if len(vendorIDRes) > 1 { + currentInfo.vendorID = vendorIDRes[1] + } + + physicalIDRes := physicalIDRegexp.FindStringSubmatch(line) + if len(physicalIDRes) > 1 { + currentInfo.physicalID = physicalIDRes[1] + } + + coreIDRes := coreIDRegexp.FindStringSubmatch(line) + if len(coreIDRes) > 1 { + currentInfo.coreID = coreIDRes[1] + } + + cpuFamilyRes := cpuFamilyRegexp.FindStringSubmatch(line) + if len(cpuFamilyRes) > 1 { + currentInfo.cpuFamily = cpuFamilyRes[1] + } + + modelRes := modelRegexp.FindStringSubmatch(line) + if len(modelRes) > 1 { + currentInfo.model = modelRes[1] + } + + flagsRes := flagsRegexp.FindStringSubmatch(line) + if len(flagsRes) > 1 { + currentInfo.flags = flagsRes[1] + + // Flags is the last value we have to acquire, so currentInfo is added to map. + stats[currentInfo.cpuID] = currentInfo + } + } + + return stats, nil +} + +// getStringsMatchingPatternOnPath looks for filenames and directory names on path matching given regexp. +// It ignores file system errors such as I/O errors reading directories. The only possible returned error +// is ErrBadPattern, when pattern is malformed. +func (fs *fileServiceImpl) getStringsMatchingPatternOnPath(path string) ([]string, error) { + return filepath.Glob(path) +} + +// readFile reads file on path and return string content. +func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { + out, err := os.ReadFile(path) + if err != nil { + return make([]byte, 0), err + } + return out, nil +} + +// readFileToFloat64 reads file on path and tries to parse content to float64. +func (fs *fileServiceImpl) readFileToFloat64(reader io.Reader) (float64, int64, error) { + read, err := io.ReadAll(reader) + if err != nil { + return 0, 0, err + } + + readDate := time.Now().UnixNano() + + // Remove new line character + trimmedString := strings.TrimRight(string(read), "\n") + // Parse result to float64 + parsedValue, err := strconv.ParseFloat(trimmedString, 64) + if err != nil { + return 0, 0, fmt.Errorf("error parsing string to float for %s", trimmedString) + } + + return parsedValue, readDate, nil +} + +// readFileAtOffsetToUint64 reads 8 bytes from passed file at given offset. +func (fs *fileServiceImpl) readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) { + buffer := make([]byte, 8) + + if offset == 0 { + return 0, fmt.Errorf("file offset %d should not be 0", offset) + } + + _, err := reader.ReadAt(buffer, offset) + if err != nil { + return 0, fmt.Errorf("error on reading file at offset %d, err: %v", offset, err) + } + + return binary.LittleEndian.Uint64(buffer), nil +} + +func newFileService() *fileServiceImpl { + return &fileServiceImpl{} +} diff --git a/plugins/inputs/intel_powerstat/file_mock_test.go b/plugins/inputs/intel_powerstat/file_mock_test.go new file mode 100644 index 0000000000000..ab4bd8c57baa6 --- /dev/null +++ b/plugins/inputs/intel_powerstat/file_mock_test.go @@ -0,0 +1,132 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package intel_powerstat + +import ( + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// mockFileService is an autogenerated mock type for the fileService type +type mockFileService struct { + mock.Mock +} + +// getCPUInfoStats provides a mock function with given fields: +func (_m *mockFileService) getCPUInfoStats() (map[string]*cpuInfo, error) { + ret := _m.Called() + + var r0 map[string]*cpuInfo + if rf, ok := ret.Get(0).(func() map[string]*cpuInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*cpuInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// getStringsMatchingPatternOnPath provides a mock function with given fields: path +func (_m *mockFileService) getStringsMatchingPatternOnPath(path string) ([]string, error) { + ret := _m.Called(path) + + var r0 []string + if rf, ok := ret.Get(0).(func(string) []string); ok { + r0 = rf(path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFile provides a mock function with given fields: path +func (_m *mockFileService) readFile(path string) ([]byte, error) { + ret := _m.Called(path) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string) []byte); ok { + r0 = rf(path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFileAtOffsetToUint64 provides a mock function with given fields: reader, offset +func (_m *mockFileService) readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) { + ret := _m.Called(reader, offset) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(io.ReaderAt, int64) uint64); ok { + r0 = rf(reader, offset) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(io.ReaderAt, int64) error); ok { + r1 = rf(reader, offset) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFileToFloat64 provides a mock function with given fields: reader +func (_m *mockFileService) readFileToFloat64(reader io.Reader) (float64, int64, error) { + ret := _m.Called(reader) + + var r0 float64 + if rf, ok := ret.Get(0).(func(io.Reader) float64); ok { + r0 = rf(reader) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 int64 + if rf, ok := ret.Get(1).(func(io.Reader) int64); ok { + r1 = rf(reader) + } else { + r1 = ret.Get(1).(int64) + } + + var r2 error + if rf, ok := ret.Get(2).(func(io.Reader) error); ok { + r2 = rf(reader) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} diff --git a/plugins/inputs/intel_powerstat/intel_powerstat.go b/plugins/inputs/intel_powerstat/intel_powerstat.go new file mode 100644 index 0000000000000..181e7642da4b8 --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat.go @@ -0,0 +1,487 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + cpuFrequency = "cpu_frequency" + cpuBusyFrequency = "cpu_busy_frequency" + cpuTemperature = "cpu_temperature" + cpuC1StateResidency = "cpu_c1_state_residency" + cpuC6StateResidency = "cpu_c6_state_residency" + cpuBusyCycles = "cpu_busy_cycles" + percentageMultiplier = 100 +) + +// PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +type PowerStat struct { + CPUMetrics []string `toml:"cpu_metrics"` + Log telegraf.Logger `toml:"-"` + + fs fileService + rapl raplService + msr msrService + + cpuFrequency bool + cpuBusyFrequency bool + cpuTemperature bool + cpuC1StateResidency bool + cpuC6StateResidency bool + cpuBusyCycles bool + cpuInfo map[string]*cpuInfo + skipFirstIteration bool +} + +// Description returns a one-sentence description on the plugin. +func (p *PowerStat) Description() string { + return `Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization.` +} + +// SampleConfig returns the default configuration of the plugin. +func (p *PowerStat) SampleConfig() string { + return ` + ## All global metrics are always collected by Intel PowerStat plugin. + ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. + ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level + ## telemetry will be exposed by Intel PowerStat plugin. + ## Supported options: + ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" + # cpu_metrics = [] +` +} + +// Init performs one time setup of the plugin. +func (p *PowerStat) Init() error { + p.parseCPUMetricsConfig() + err := p.verifyProcessor() + if err != nil { + return err + } + // Initialize MSR service only when there is at least one core metric enabled. + if p.cpuFrequency || p.cpuBusyFrequency || p.cpuTemperature || p.cpuC1StateResidency || + p.cpuC6StateResidency || p.cpuBusyCycles { + p.msr = newMsrServiceWithFs(p.Log, p.fs) + } + p.rapl = newRaplServiceWithFs(p.Log, p.fs) + + return nil +} + +// Gather takes in an accumulator and adds the metrics that the Input gathers. +func (p *PowerStat) Gather(acc telegraf.Accumulator) error { + p.addGlobalMetrics(acc) + + if p.areCoreMetricsEnabled() { + p.addPerCoreMetrics(acc) + } + + // Gathering the first iteration of metrics was skipped for most of them because they are based on delta calculations. + p.skipFirstIteration = false + + return nil +} + +func (p *PowerStat) addGlobalMetrics(acc telegraf.Accumulator) { + // Prepare RAPL data each gather because there is a possibility to disable rapl kernel module + p.rapl.initializeRaplData() + + for socketID := range p.rapl.getRaplData() { + err := p.rapl.retrieveAndCalculateData(socketID) + if err != nil { + // In case of an error skip calculating metrics for this socket + p.Log.Errorf("error fetching rapl data for socket %s, err: %v", socketID, err) + continue + } + p.addThermalDesignPowerMetric(socketID, acc) + if p.skipFirstIteration { + continue + } + p.addCurrentSocketPowerConsumption(socketID, acc) + p.addCurrentDramPowerConsumption(socketID, acc) + } +} + +func (p *PowerStat) addThermalDesignPowerMetric(socketID string, acc telegraf.Accumulator) { + maxPower, err := p.rapl.getConstraintMaxPowerWatts(socketID) + if err != nil { + p.Log.Errorf("error while retrieving TDP of the socket %s, err: %v", socketID, err) + return + } + + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "thermal_design_power_watts": roundFloatToNearestTwoDecimalPlaces(maxPower), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addCurrentSocketPowerConsumption(socketID string, acc telegraf.Accumulator) { + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "current_power_consumption_watts": roundFloatToNearestTwoDecimalPlaces(p.rapl.getRaplData()[socketID].socketCurrentEnergy), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addCurrentDramPowerConsumption(socketID string, acc telegraf.Accumulator) { + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "current_dram_power_consumption_watts": roundFloatToNearestTwoDecimalPlaces(p.rapl.getRaplData()[socketID].dramCurrentEnergy), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addPerCoreMetrics(acc telegraf.Accumulator) { + var wg sync.WaitGroup + wg.Add(len(p.msr.getCPUCoresData())) + + for cpuID := range p.msr.getCPUCoresData() { + go p.addMetricsForSingleCore(cpuID, acc, &wg) + } + + wg.Wait() +} + +func (p *PowerStat) addMetricsForSingleCore(cpuID string, acc telegraf.Accumulator, wg *sync.WaitGroup) { + defer wg.Done() + + if p.cpuFrequency { + p.addCPUFrequencyMetric(cpuID, acc) + } + + // Read data from MSR only if required + if p.cpuC1StateResidency || p.cpuC6StateResidency || p.cpuBusyCycles || p.cpuTemperature || + p.cpuBusyFrequency { + err := p.msr.openAndReadMsr(cpuID) + if err != nil { + // In case of an error exit the function. All metrics past this point are dependant on MSR. + p.Log.Debugf("error while reading msr: %v", err) + return + } + } + + if p.cpuTemperature { + p.addCPUTemperatureMetric(cpuID, acc) + } + + // cpuBusyFrequency metric does some calculations inside that are required in another plugin cycle. + if p.cpuBusyFrequency { + p.addCPUBusyFrequencyMetric(cpuID, acc) + } + + if !p.skipFirstIteration { + if p.cpuC1StateResidency { + p.addCPUC1StateResidencyMetric(cpuID, acc) + } + + if p.cpuC6StateResidency { + p.addCPUC6StateResidencyMetric(cpuID, acc) + } + + if p.cpuBusyCycles { + p.addCPUBusyCyclesMetric(cpuID, acc) + } + } +} + +func (p *PowerStat) addCPUFrequencyMetric(cpuID string, acc telegraf.Accumulator) { + frequency, err := p.msr.retrieveCPUFrequencyForCore(cpuID) + + // In case of an error leave func + if err != nil { + p.Log.Debugf("error while reading file: %v", err) + return + } + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + + fields := map[string]interface{}{ + "cpu_frequency_mhz": roundFloatToNearestTwoDecimalPlaces(frequency), + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUTemperatureMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + temp := coresData[cpuID].throttleTemp - coresData[cpuID].temp + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_temperature_celsius": temp, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUBusyFrequencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + mperfDelta := coresData[cpuID].mperfDelta + // Avoid division by 0 + if mperfDelta == 0 { + p.Log.Errorf("mperf delta should not equal 0 on core %s", cpuID) + return + } + aperfMperf := float64(coresData[cpuID].aperfDelta) / float64(mperfDelta) + tsc := convertProcessorCyclesToHertz(coresData[cpuID].timeStampCounterDelta) + timeNow := time.Now().UnixNano() + interval := convertNanoSecondsToSeconds(timeNow - coresData[cpuID].readDate) + coresData[cpuID].readDate = timeNow + + if p.skipFirstIteration { + return + } + + if interval == 0 { + p.Log.Errorf("interval between last two Telegraf cycles is 0") + return + } + + busyMhzValue := roundFloatToNearestTwoDecimalPlaces(tsc * aperfMperf / interval) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_busy_frequency_mhz": busyMhzValue, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUC1StateResidencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + timestampDeltaBig := new(big.Int).SetUint64(coresData[cpuID].timeStampCounterDelta) + // Avoid division by 0 + if timestampDeltaBig.Sign() < 1 { + p.Log.Errorf("timestamp delta value %v should not be lower than 1", timestampDeltaBig) + return + } + + // Since counter collection is not atomic it may happen that sum of C0, C1, C3, C6 and C7 + // is bigger value than TSC, in such case C1 residency shall be set to 0. + // Operating on big.Int to avoid overflow + mperfDeltaBig := new(big.Int).SetUint64(coresData[cpuID].mperfDelta) + c3DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c3Delta) + c6DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c6Delta) + c7DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c7Delta) + + c1Big := new(big.Int).Sub(timestampDeltaBig, mperfDeltaBig) + c1Big.Sub(c1Big, c3DeltaBig) + c1Big.Sub(c1Big, c6DeltaBig) + c1Big.Sub(c1Big, c7DeltaBig) + + if c1Big.Sign() < 0 { + c1Big = c1Big.SetInt64(0) + } + c1Value := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(c1Big.Uint64()) / float64(timestampDeltaBig.Uint64())) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_c1_state_residency_percent": c1Value, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUC6StateResidencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + // Avoid division by 0 + if coresData[cpuID].timeStampCounterDelta == 0 { + p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", + timestampCounterLocation, cpuID) + return + } + c6Value := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(coresData[cpuID].c6Delta) / float64(coresData[cpuID].timeStampCounterDelta)) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_c6_state_residency_percent": c6Value, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUBusyCyclesMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + // Avoid division by 0 + if coresData[cpuID].timeStampCounterDelta == 0 { + p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", + timestampCounterLocation, cpuID) + return + } + busyCyclesValue := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(coresData[cpuID].mperfDelta) / float64(coresData[cpuID].timeStampCounterDelta)) + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_busy_cycles_percent": busyCyclesValue, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) parseCPUMetricsConfig() { + if len(p.CPUMetrics) == 0 { + return + } + + if contains(p.CPUMetrics, cpuFrequency) { + p.cpuFrequency = true + } + + if contains(p.CPUMetrics, cpuC1StateResidency) { + p.cpuC1StateResidency = true + } + + if contains(p.CPUMetrics, cpuC6StateResidency) { + p.cpuC6StateResidency = true + } + + if contains(p.CPUMetrics, cpuBusyCycles) { + p.cpuBusyCycles = true + } + + if contains(p.CPUMetrics, cpuBusyFrequency) { + p.cpuBusyFrequency = true + } + + if contains(p.CPUMetrics, cpuTemperature) { + p.cpuTemperature = true + } +} + +func (p *PowerStat) verifyProcessor() error { + allowedProcessorModelsForC1C6 := []int64{0x37, 0x4D, 0x5C, 0x5F, 0x7A, 0x4C, 0x86, 0x96, 0x9C, + 0x1A, 0x1E, 0x1F, 0x2E, 0x25, 0x2C, 0x2F, 0x2A, 0x2D, 0x3A, 0x3E, 0x4E, 0x5E, 0x55, 0x8E, + 0x9E, 0x6A, 0x6C, 0x7D, 0x7E, 0x9D, 0x3C, 0x3F, 0x45, 0x46, 0x3D, 0x47, 0x4F, 0x56, + 0x66, 0x57, 0x85, 0xA5, 0xA6, 0x8F, 0x8C, 0x8D} + stats, err := p.fs.getCPUInfoStats() + if err != nil { + return err + } + + p.cpuInfo = stats + + // First CPU is sufficient for verification. + firstCPU := p.cpuInfo["0"] + if firstCPU == nil { + return fmt.Errorf("first core not found while parsing /proc/cpuinfo") + } + + if firstCPU.vendorID != "GenuineIntel" || firstCPU.cpuFamily != "6" { + return fmt.Errorf("Intel processor not found, vendorId: %s", firstCPU.vendorID) + } + + if !contains(convertIntegerArrayToStringArray(allowedProcessorModelsForC1C6), firstCPU.model) { + p.cpuC1StateResidency = false + p.cpuC6StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "msr") { + p.cpuTemperature = false + p.cpuC6StateResidency = false + p.cpuBusyCycles = false + p.cpuBusyFrequency = false + p.cpuC1StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "aperfmperf") { + p.cpuBusyFrequency = false + p.cpuBusyCycles = false + p.cpuC1StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "dts") { + p.cpuTemperature = false + } + + return nil +} + +func contains(slice []string, str string) bool { + for _, v := range slice { + if v == str { + return true + } + } + + return false +} + +func (p *PowerStat) areCoreMetricsEnabled() bool { + return p.msr != nil && len(p.msr.getCPUCoresData()) > 0 +} + +// newPowerStat creates and returns PowerStat struct. +func newPowerStat(fs fileService) *PowerStat { + p := &PowerStat{ + cpuFrequency: false, + cpuC1StateResidency: false, + cpuC6StateResidency: false, + cpuBusyCycles: false, + cpuTemperature: false, + cpuBusyFrequency: false, + skipFirstIteration: true, + fs: fs, + } + + return p +} + +func init() { + inputs.Add("intel_powerstat", func() telegraf.Input { + return newPowerStat(newFileService()) + }) +} diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go new file mode 100644 index 0000000000000..256e64970094e --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_test.go b/plugins/inputs/intel_powerstat/intel_powerstat_test.go new file mode 100644 index 0000000000000..ce01e77997cdc --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat_test.go @@ -0,0 +1,495 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "errors" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestInitPlugin(t *testing.T) { + cores := []string{"cpu0", "cpu1", "cpu2", "cpu3"} + power, fsMock, _, _ := getPowerWithMockedServices() + + fsMock.On("getCPUInfoStats", mock.Anything). + Return(nil, errors.New("error getting cpu stats")).Once() + require.Error(t, power.Init()) + + fsMock.On("getCPUInfoStats", mock.Anything). + Return(make(map[string]*cpuInfo), nil).Once() + require.Error(t, power.Init()) + + fsMock.On("getCPUInfoStats", mock.Anything). + Return(map[string]*cpuInfo{"0": { + vendorID: "GenuineIntel", + cpuFamily: "test", + }}, nil).Once() + require.Error(t, power.Init()) + + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once(). + On("getCPUInfoStats", mock.Anything). + Return(map[string]*cpuInfo{"0": { + vendorID: "GenuineIntel", + cpuFamily: "6", + }}, nil) + // Verify MSR service initialization. + power.cpuFrequency = true + require.NoError(t, power.Init()) + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(cores), len(power.msr.getCPUCoresData())) + + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(nil, errors.New("error during getStringsMatchingPatternOnPath")).Once() + + // In case of an error when fetching cpu cores plugin should proceed with execution. + require.NoError(t, power.Init()) + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, 0, len(power.msr.getCPUCoresData())) +} + +func TestParseCPUMetricsConfig(t *testing.T) { + power, _, _, _ := getPowerWithMockedServices() + disableCoreMetrics(power) + + power.CPUMetrics = []string{ + "cpu_frequency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", + "cpu_busy_frequency", + } + power.parseCPUMetricsConfig() + verifyCoreMetrics(t, power, true) + disableCoreMetrics(power) + verifyCoreMetrics(t, power, false) + + power.CPUMetrics = []string{} + power.parseCPUMetricsConfig() + + power.CPUMetrics = []string{"cpu_c6_state_residency", "#@$sdkjdfsdf3@", "1pu_c1_state_residency"} + power.parseCPUMetricsConfig() + require.Equal(t, false, power.cpuC1StateResidency) + require.Equal(t, true, power.cpuC6StateResidency) + disableCoreMetrics(power) + verifyCoreMetrics(t, power, false) + + power.CPUMetrics = []string{"#@$sdkjdfsdf3@", "1pu_c1_state_residency", "123"} + power.parseCPUMetricsConfig() + verifyCoreMetrics(t, power, false) +} + +func verifyCoreMetrics(t *testing.T, power *PowerStat, enabled bool) { + require.Equal(t, enabled, power.cpuFrequency) + require.Equal(t, enabled, power.cpuC1StateResidency) + require.Equal(t, enabled, power.cpuC6StateResidency) + require.Equal(t, enabled, power.cpuBusyCycles) + require.Equal(t, enabled, power.cpuBusyFrequency) + require.Equal(t, enabled, power.cpuTemperature) +} + +func TestGather(t *testing.T) { + var acc testutil.Accumulator + packageIDs := []string{"0", "1"} + coreIDs := []string{"0", "1", "2", "3"} + socketCurrentEnergy := 13213852.2 + dramCurrentEnergy := 784552.0 + preparedCPUData := getPreparedCPUData(coreIDs) + raplDataMap := prepareRaplDataMap(packageIDs, socketCurrentEnergy, dramCurrentEnergy) + + power, _, raplMock, msrMock := getPowerWithMockedServices() + prepareCPUInfo(power, coreIDs, packageIDs) + enableCoreMetrics(power) + power.skipFirstIteration = false + + raplMock.On("initializeRaplData", mock.Anything). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Times(len(raplDataMap)). + On("getConstraintMaxPowerWatts", mock.Anything).Return(546783852.3, nil) + msrMock.On("getCPUCoresData").Return(preparedCPUData). + On("openAndReadMsr", mock.Anything).Return(nil). + On("retrieveCPUFrequencyForCore", mock.Anything).Return(1200000.2, nil) + + require.NoError(t, power.Gather(&acc)) + // Number of global metrics : 3 + // Number of per core metrics : 6 + require.Equal(t, 3*len(packageIDs)+6*len(coreIDs), len(acc.GetTelegrafMetrics())) +} + +func TestAddGlobalMetricsNegative(t *testing.T) { + var acc testutil.Accumulator + socketCurrentEnergy := 13213852.2 + dramCurrentEnergy := 784552.0 + raplDataMap := prepareRaplDataMap([]string{"0", "1"}, socketCurrentEnergy, dramCurrentEnergy) + power, _, raplMock, _ := getPowerWithMockedServices() + power.skipFirstIteration = false + raplMock.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(raplDataMap).Once(). + On("retrieveAndCalculateData", mock.Anything).Return(errors.New("error while calculating data")).Times(len(raplDataMap)) + + power.addGlobalMetrics(&acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + raplMock.AssertNumberOfCalls(t, "retrieveAndCalculateData", len(raplDataMap)) + + raplMock.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(make(map[string]*raplData)).Once() + + power.addGlobalMetrics(&acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + raplMock.AssertNotCalled(t, "retrieveAndCalculateData") + + raplMock.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Once(). + On("retrieveAndCalculateData", mock.Anything).Return(errors.New("error while calculating data")).Once(). + On("getConstraintMaxPowerWatts", mock.Anything).Return(12313851.5, nil).Twice() + + power.addGlobalMetrics(&acc) + require.Equal(t, 3, len(acc.GetTelegrafMetrics())) +} + +func TestAddGlobalMetricsPositive(t *testing.T) { + var acc testutil.Accumulator + socketCurrentEnergy := 3644574.4 + dramCurrentEnergy := 124234872.5 + raplDataMap := prepareRaplDataMap([]string{"0", "1"}, socketCurrentEnergy, dramCurrentEnergy) + maxPower := 546783852.9 + power, _, raplMock, _ := getPowerWithMockedServices() + power.skipFirstIteration = false + + raplMock.On("initializeRaplData", mock.Anything). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Times(len(raplDataMap)). + On("getConstraintMaxPowerWatts", mock.Anything).Return(maxPower, nil).Twice(). + On("getCurrentDramPowerConsumption", mock.Anything).Return(dramCurrentEnergy) + + power.addGlobalMetrics(&acc) + require.Equal(t, 6, len(acc.GetTelegrafMetrics())) + + expectedResults := getGlobalMetrics(maxPower, socketCurrentEnergy, dramCurrentEnergy) + for _, test := range expectedResults { + acc.AssertContainsTaggedFields(t, "powerstat_package", test.fields, test.tags) + } +} + +func TestAddMetricsForSingleCoreNegative(t *testing.T) { + var wg sync.WaitGroup + var acc testutil.Accumulator + core := "0" + power, _, _, msrMock := getPowerWithMockedServices() + + msrMock.On("openAndReadMsr", core).Return(errors.New("error reading MSR file")).Once() + + // Skip generating metric for CPU frequency. + power.cpuFrequency = false + + wg.Add(1) + power.addMetricsForSingleCore(core, &acc, &wg) + wg.Wait() + + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddCPUFrequencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "1" + coreID := "3" + packageID := "0" + frequency := 1200000.2 + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + + msrMock.On("retrieveCPUFrequencyForCore", mock.Anything). + Return(float64(0), errors.New("error on reading file")).Once() + + power.addCPUFrequencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + + msrMock.On("retrieveCPUFrequencyForCore", mock.Anything).Return(frequency, nil).Once() + + power.addCPUFrequencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedFrequency := roundFloatToNearestTwoDecimalPlaces(frequency) + expectedMetric := getPowerCoreMetric("cpu_frequency_mhz", expectedFrequency, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) +} + +func TestAddCoreCPUTemperatureMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + preparedData := getPreparedCPUData([]string{cpuID}) + expectedTemp := preparedData[cpuID].throttleTemp - preparedData[cpuID].temp + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + + msrMock.On("getCPUCoresData").Return(preparedData).Once() + power.addCPUTemperatureMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_temperature_celsius", expectedTemp, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) +} + +func TestAddC6StateResidencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + expectedC6 := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(preparedData[cpuID].c6Delta) / float64(preparedData[cpuID].timeStampCounterDelta)) + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUC6StateResidencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_c6_state_residency_percent", expectedC6, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + + power.addCPUC6StateResidencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddProcessorBusyCyclesMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + expectedBusyCycles := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(preparedData[cpuID].mperfDelta) / + float64(preparedData[cpuID].timeStampCounterDelta)) + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUBusyCyclesMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_busy_cycles_percent", expectedBusyCycles, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + power.addCPUBusyCyclesMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddProcessorBusyFrequencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + power.skipFirstIteration = false + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUBusyFrequencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + acc.ClearMetrics() + preparedData[cpuID].mperfDelta = 0 + power.addCPUBusyFrequencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddC1StateResidencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + c1 := preparedData[cpuID].timeStampCounterDelta - preparedData[cpuID].mperfDelta - preparedData[cpuID].c3Delta - + preparedData[cpuID].c6Delta - preparedData[cpuID].c7Delta + expectedC1 := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(c1) / float64(preparedData[cpuID].timeStampCounterDelta)) + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + + power.addCPUC1StateResidencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_c1_state_residency_percent", expectedC1, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + power.addCPUC1StateResidencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddThermalDesignPowerMetric(t *testing.T) { + var acc testutil.Accumulator + sockets := []string{"0"} + maxPower := 195720672.1 + power, _, raplMock, _ := getPowerWithMockedServices() + + raplMock.On("getConstraintMaxPowerWatts", mock.Anything). + Return(float64(0), errors.New("getConstraintMaxPowerWatts error")).Once(). + On("getConstraintMaxPowerWatts", mock.Anything).Return(maxPower, nil).Once() + + power.addThermalDesignPowerMetric(sockets[0], &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + + power.addThermalDesignPowerMetric(sockets[0], &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedTDP := roundFloatToNearestTwoDecimalPlaces(maxPower) + expectedMetric := getPowerGlobalMetric("thermal_design_power_watts", expectedTDP, sockets[0]) + acc.AssertContainsTaggedFields(t, "powerstat_package", expectedMetric.fields, expectedMetric.tags) +} + +func getPreparedCPUData(cores []string) map[string]*msrData { + msrDataMap := make(map[string]*msrData) + + for _, core := range cores { + msrDataMap[core] = &msrData{ + mperf: 43079, + aperf: 82001, + timeStampCounter: 15514, + c3: 52829, + c6: 86930, + c7: 25340, + throttleTemp: 88150, + temp: 40827, + mperfDelta: 23515, + aperfDelta: 33866, + timeStampCounterDelta: 13686000, + c3Delta: 20003, + c6Delta: 44518, + c7Delta: 20979, + } + } + + return msrDataMap +} + +func getGlobalMetrics(maxPower float64, socketCurrentEnergy float64, dramCurrentEnergy float64) []struct { + fields map[string]interface{} + tags map[string]string +} { + return []struct { + fields map[string]interface{} + tags map[string]string + }{ + getPowerGlobalMetric("thermal_design_power_watts", roundFloatToNearestTwoDecimalPlaces(maxPower), "0"), + getPowerGlobalMetric("thermal_design_power_watts", roundFloatToNearestTwoDecimalPlaces(maxPower), "1"), + getPowerGlobalMetric("current_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(socketCurrentEnergy), "0"), + getPowerGlobalMetric("current_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(socketCurrentEnergy), "1"), + getPowerGlobalMetric("current_dram_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(dramCurrentEnergy), "0"), + getPowerGlobalMetric("current_dram_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(dramCurrentEnergy), "1"), + } +} + +func getPowerCoreMetric(name string, value interface{}, coreID string, packageID string, cpuID string) struct { + fields map[string]interface{} + tags map[string]string +} { + return getPowerMetric(name, value, map[string]string{"package_id": packageID, "core_id": coreID, "cpu_id": cpuID}) +} + +func getPowerGlobalMetric(name string, value interface{}, socketID string) struct { + fields map[string]interface{} + tags map[string]string +} { + return getPowerMetric(name, value, map[string]string{"package_id": socketID}) +} + +func getPowerMetric(name string, value interface{}, tags map[string]string) struct { + fields map[string]interface{} + tags map[string]string +} { + return struct { + fields map[string]interface{} + tags map[string]string + }{ + map[string]interface{}{ + name: value, + }, + tags, + } +} + +func prepareCPUInfoForSingleCPU(power *PowerStat, cpuID string, coreID string, packageID string) { + power.cpuInfo = make(map[string]*cpuInfo) + power.cpuInfo[cpuID] = &cpuInfo{ + physicalID: packageID, + coreID: coreID, + cpuID: cpuID, + } +} + +func prepareCPUInfo(power *PowerStat, coreIDs []string, packageIDs []string) { + power.cpuInfo = make(map[string]*cpuInfo) + currentCPU := 0 + for _, packageID := range packageIDs { + for _, coreID := range coreIDs { + cpuID := strconv.Itoa(currentCPU) + power.cpuInfo[cpuID] = &cpuInfo{ + physicalID: packageID, + cpuID: cpuID, + coreID: coreID, + } + currentCPU++ + } + } +} + +func enableCoreMetrics(power *PowerStat) { + power.cpuC1StateResidency = true + power.cpuC6StateResidency = true + power.cpuTemperature = true + power.cpuBusyFrequency = true + power.cpuFrequency = true + power.cpuBusyCycles = true +} + +func disableCoreMetrics(power *PowerStat) { + power.cpuC1StateResidency = false + power.cpuC6StateResidency = false + power.cpuTemperature = false + power.cpuBusyFrequency = false + power.cpuFrequency = false + power.cpuBusyCycles = false +} + +func prepareRaplDataMap(socketIDs []string, socketCurrentEnergy float64, dramCurrentEnergy float64) map[string]*raplData { + raplDataMap := make(map[string]*raplData, len(socketIDs)) + for _, socketID := range socketIDs { + raplDataMap[socketID] = &raplData{ + socketCurrentEnergy: socketCurrentEnergy, + dramCurrentEnergy: dramCurrentEnergy, + } + } + + return raplDataMap +} + +func getPowerWithMockedServices() (*PowerStat, *mockFileService, *mockRaplService, *mockMsrService) { + fsMock := &mockFileService{} + msrMock := &mockMsrService{} + raplMock := &mockRaplService{} + logger := testutil.Logger{Name: "PowerPluginTest"} + p := newPowerStat(fsMock) + p.Log = logger + p.fs = fsMock + p.rapl = raplMock + p.msr = msrMock + + return p, fsMock, raplMock, msrMock +} diff --git a/plugins/inputs/intel_powerstat/msr.go b/plugins/inputs/intel_powerstat/msr.go new file mode 100644 index 0000000000000..6c19b56eb7cc5 --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr.go @@ -0,0 +1,208 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/influxdata/telegraf" +) + +const ( + systemCPUPath = "/sys/devices/system/cpu/" + cpuCurrentFreqPartialPath = "/sys/devices/system/cpu/cpu%s/cpufreq/scaling_cur_freq" + msrPartialPath = "/dev/cpu/%s/msr" + c3StateResidencyLocation = 0x3FC + c6StateResidencyLocation = 0x3FD + c7StateResidencyLocation = 0x3FE + maximumFrequencyClockCountLocation = 0xE7 + actualFrequencyClockCountLocation = 0xE8 + throttleTemperatureLocation = 0x1A2 + temperatureLocation = 0x19C + timestampCounterLocation = 0x10 +) + +// msrService is responsible for interactions with MSR. +type msrService interface { + getCPUCoresData() map[string]*msrData + retrieveCPUFrequencyForCore(core string) (float64, error) + openAndReadMsr(core string) error +} + +type msrServiceImpl struct { + cpuCoresData map[string]*msrData + msrOffsets []int64 + fs fileService + log telegraf.Logger +} + +func (m *msrServiceImpl) getCPUCoresData() map[string]*msrData { + return m.cpuCoresData +} + +func (m *msrServiceImpl) retrieveCPUFrequencyForCore(core string) (float64, error) { + cpuFreqPath := fmt.Sprintf(cpuCurrentFreqPartialPath, core) + cpuFreqFile, err := os.Open(cpuFreqPath) + if err != nil { + return 0, fmt.Errorf("error opening scaling_cur_freq file on path %s, err: %v", cpuFreqPath, err) + } + defer cpuFreqFile.Close() + + cpuFreq, _, err := m.fs.readFileToFloat64(cpuFreqFile) + return convertKiloHertzToMegaHertz(cpuFreq), err +} + +func (m *msrServiceImpl) openAndReadMsr(core string) error { + path := fmt.Sprintf(msrPartialPath, core) + msrFile, err := os.Open(path) + if err != nil { + return fmt.Errorf("error opening MSR file on path %s, err: %v", path, err) + } + defer msrFile.Close() + + err = m.readDataFromMsr(core, msrFile) + if err != nil { + return fmt.Errorf("error reading data from MSR for core %s, err: %v", core, err) + } + return nil +} + +func (m *msrServiceImpl) readDataFromMsr(core string, reader io.ReaderAt) error { + g, ctx := errgroup.WithContext(context.Background()) + + // Create and populate a map that contains msr offsets along with their respective channels + msrOffsetsWithChannels := make(map[int64]chan uint64) + for _, offset := range m.msrOffsets { + msrOffsetsWithChannels[offset] = make(chan uint64) + } + + // Start a goroutine for each msr offset + for offset, channel := range msrOffsetsWithChannels { + // Wrap around function to avoid race on loop counter + func(off int64, ch chan uint64) { + g.Go(func() error { + defer close(ch) + + err := m.readValueFromFileAtOffset(ctx, ch, reader, off) + if err != nil { + return fmt.Errorf("error reading MSR file, err: %v", err) + } + + return nil + }) + }(offset, channel) + } + + newC3 := <-msrOffsetsWithChannels[c3StateResidencyLocation] + newC6 := <-msrOffsetsWithChannels[c6StateResidencyLocation] + newC7 := <-msrOffsetsWithChannels[c7StateResidencyLocation] + newMperf := <-msrOffsetsWithChannels[maximumFrequencyClockCountLocation] + newAperf := <-msrOffsetsWithChannels[actualFrequencyClockCountLocation] + newTsc := <-msrOffsetsWithChannels[timestampCounterLocation] + newThrottleTemp := <-msrOffsetsWithChannels[throttleTemperatureLocation] + newTemp := <-msrOffsetsWithChannels[temperatureLocation] + + if err := g.Wait(); err != nil { + return fmt.Errorf("received error during reading MSR values in goroutines: %v", err) + } + + m.cpuCoresData[core].c3Delta = newC3 - m.cpuCoresData[core].c3 + m.cpuCoresData[core].c6Delta = newC6 - m.cpuCoresData[core].c6 + m.cpuCoresData[core].c7Delta = newC7 - m.cpuCoresData[core].c7 + m.cpuCoresData[core].mperfDelta = newMperf - m.cpuCoresData[core].mperf + m.cpuCoresData[core].aperfDelta = newAperf - m.cpuCoresData[core].aperf + m.cpuCoresData[core].timeStampCounterDelta = newTsc - m.cpuCoresData[core].timeStampCounter + + m.cpuCoresData[core].c3 = newC3 + m.cpuCoresData[core].c6 = newC6 + m.cpuCoresData[core].c7 = newC7 + m.cpuCoresData[core].mperf = newMperf + m.cpuCoresData[core].aperf = newAperf + m.cpuCoresData[core].timeStampCounter = newTsc + // MSR (1A2h) IA32_TEMPERATURE_TARGET bits 23:16. + m.cpuCoresData[core].throttleTemp = (newThrottleTemp >> 16) & 0xFF + // MSR (19Ch) IA32_THERM_STATUS bits 22:16. + m.cpuCoresData[core].temp = (newTemp >> 16) & 0x7F + + return nil +} + +func (m *msrServiceImpl) readValueFromFileAtOffset(ctx context.Context, ch chan uint64, reader io.ReaderAt, offset int64) error { + value, err := m.fs.readFileAtOffsetToUint64(reader, offset) + if err != nil { + return err + } + + // Detect context cancellation and return an error if other goroutine fails + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- value: + } + + return nil +} + +// setCPUCores initialize cpuCoresData map. +func (m *msrServiceImpl) setCPUCores() error { + m.cpuCoresData = make(map[string]*msrData) + cpuPrefix := "cpu" + cpuCore := fmt.Sprintf("%s%s", cpuPrefix, "[0-9]*") + cpuCorePattern := fmt.Sprintf("%s/%s", systemCPUPath, cpuCore) + cpuPaths, err := m.fs.getStringsMatchingPatternOnPath(cpuCorePattern) + if err != nil { + return err + } + if len(cpuPaths) == 0 { + m.log.Debugf("CPU core data wasn't found using pattern: %s", cpuCorePattern) + return nil + } + + for _, cpuPath := range cpuPaths { + core := strings.TrimPrefix(filepath.Base(cpuPath), cpuPrefix) + m.cpuCoresData[core] = &msrData{ + mperf: 0, + aperf: 0, + timeStampCounter: 0, + c3: 0, + c6: 0, + c7: 0, + throttleTemp: 0, + temp: 0, + mperfDelta: 0, + aperfDelta: 0, + timeStampCounterDelta: 0, + c3Delta: 0, + c6Delta: 0, + c7Delta: 0, + } + } + + return nil +} + +func newMsrServiceWithFs(logger telegraf.Logger, fs fileService) *msrServiceImpl { + msrService := &msrServiceImpl{ + fs: fs, + log: logger, + } + err := msrService.setCPUCores() + if err != nil { + // This error does not prevent plugin from working thus it is not returned. + msrService.log.Error(err) + } + + msrService.msrOffsets = []int64{c3StateResidencyLocation, c6StateResidencyLocation, c7StateResidencyLocation, + maximumFrequencyClockCountLocation, actualFrequencyClockCountLocation, timestampCounterLocation, + throttleTemperatureLocation, temperatureLocation} + + return msrService +} diff --git a/plugins/inputs/intel_powerstat/msr_mock_test.go b/plugins/inputs/intel_powerstat/msr_mock_test.go new file mode 100644 index 0000000000000..4ca80e8a871bf --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr_mock_test.go @@ -0,0 +1,61 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package intel_powerstat + +import mock "github.com/stretchr/testify/mock" + +// mockMsrService is an autogenerated mock type for the msrService type +type mockMsrService struct { + mock.Mock +} + +// getCPUCoresData provides a mock function with given fields: +func (_m *mockMsrService) getCPUCoresData() map[string]*msrData { + ret := _m.Called() + + var r0 map[string]*msrData + if rf, ok := ret.Get(0).(func() map[string]*msrData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*msrData) + } + } + + return r0 +} + +// openAndReadMsr provides a mock function with given fields: core +func (_m *mockMsrService) openAndReadMsr(core string) error { + ret := _m.Called(core) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(core) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// retrieveCPUFrequencyForCore provides a mock function with given fields: core +func (_m *mockMsrService) retrieveCPUFrequencyForCore(core string) (float64, error) { + ret := _m.Called(core) + + var r0 float64 + if rf, ok := ret.Get(0).(func(string) float64); ok { + r0 = rf(core) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(core) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/plugins/inputs/intel_powerstat/msr_test.go b/plugins/inputs/intel_powerstat/msr_test.go new file mode 100644 index 0000000000000..b03d2b00960a9 --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr_test.go @@ -0,0 +1,135 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestReadDataFromMsrPositive(t *testing.T) { + firstValue := uint64(1000000) + secondValue := uint64(5000000) + delta := secondValue - firstValue + cpuCores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + prepareTestData(fsMock, cpuCores, msr, t) + cores := trimCPUFromCores(cpuCores) + + methodCallNumberForFirstValue := len(msr.msrOffsets) * len(cores) + methodCallNumberForSecondValue := methodCallNumberForFirstValue * 2 + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(firstValue, nil).Times(methodCallNumberForFirstValue) + for _, core := range cores { + require.NoError(t, msr.readDataFromMsr(core, nil)) + } + fsMock.AssertNumberOfCalls(t, "readFileAtOffsetToUint64", methodCallNumberForFirstValue) + verifyCPUCoresData(cores, t, msr, firstValue, false, 0) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(secondValue, nil).Times(methodCallNumberForFirstValue) + for _, core := range cores { + require.NoError(t, msr.readDataFromMsr(core, nil)) + } + fsMock.AssertNumberOfCalls(t, "readFileAtOffsetToUint64", methodCallNumberForSecondValue) + verifyCPUCoresData(cores, t, msr, secondValue, true, delta) +} + +func trimCPUFromCores(cpuCores []string) []string { + cores := make([]string, 0) + for _, core := range cpuCores { + cores = append(cores, strings.TrimPrefix(core, "cpu")) + } + return cores +} + +func TestReadDataFromMsrNegative(t *testing.T) { + firstValue := uint64(1000000) + cpuCores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + + prepareTestData(fsMock, cpuCores, msr, t) + cores := trimCPUFromCores(cpuCores) + + methodCallNumberPerCore := len(msr.msrOffsets) + + // Normal execution for first core. + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(firstValue, nil).Times(methodCallNumberPerCore). + // Fail to read file for second core. + On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(uint64(0), errors.New("error reading file")).Times(methodCallNumberPerCore) + + require.NoError(t, msr.readDataFromMsr(cores[0], nil)) + require.Error(t, msr.readDataFromMsr(cores[1], nil)) +} + +func TestReadValueFromFileAtOffset(t *testing.T) { + cores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + ctx := context.Background() + testChannel := make(chan uint64, 1) + defer close(testChannel) + zero := uint64(0) + + prepareTestData(fsMock, cores, msr, t) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(zero, errors.New("error reading file")).Once() + require.Error(t, msr.readValueFromFileAtOffset(ctx, testChannel, nil, 0)) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(zero, nil).Once() + require.Equal(t, nil, msr.readValueFromFileAtOffset(ctx, testChannel, nil, 0)) + require.Equal(t, zero, <-testChannel) +} + +func prepareTestData(fsMock *mockFileService, cores []string, msr *msrServiceImpl, t *testing.T) { + // Prepare MSR offsets and CPUCoresData for test. + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once() + require.NoError(t, msr.setCPUCores()) + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) +} + +func verifyCPUCoresData(cores []string, t *testing.T, msr *msrServiceImpl, expectedValue uint64, verifyDelta bool, delta uint64) { + for _, core := range cores { + require.Equal(t, expectedValue, msr.cpuCoresData[core].c3) + require.Equal(t, expectedValue, msr.cpuCoresData[core].c6) + require.Equal(t, expectedValue, msr.cpuCoresData[core].c7) + require.Equal(t, expectedValue, msr.cpuCoresData[core].mperf) + require.Equal(t, expectedValue, msr.cpuCoresData[core].aperf) + require.Equal(t, expectedValue, msr.cpuCoresData[core].timeStampCounter) + require.Equal(t, (expectedValue>>16)&0xFF, msr.cpuCoresData[core].throttleTemp) + require.Equal(t, (expectedValue>>16)&0x7F, msr.cpuCoresData[core].temp) + + if verifyDelta { + require.Equal(t, delta, msr.cpuCoresData[core].c3Delta) + require.Equal(t, delta, msr.cpuCoresData[core].c6Delta) + require.Equal(t, delta, msr.cpuCoresData[core].c7Delta) + require.Equal(t, delta, msr.cpuCoresData[core].mperfDelta) + require.Equal(t, delta, msr.cpuCoresData[core].aperfDelta) + require.Equal(t, delta, msr.cpuCoresData[core].timeStampCounterDelta) + } + } +} + +func getMsrServiceWithMockedFs() (*msrServiceImpl, *mockFileService) { + cores := []string{"cpu0", "cpu1", "cpu2", "cpu3"} + logger := testutil.Logger{Name: "PowerPluginTest"} + fsMock := &mockFileService{} + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once() + msr := newMsrServiceWithFs(logger, fsMock) + + return msr, fsMock +} diff --git a/plugins/inputs/intel_powerstat/rapl.go b/plugins/inputs/intel_powerstat/rapl.go new file mode 100644 index 0000000000000..32d60ac89c705 --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl.go @@ -0,0 +1,238 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/influxdata/telegraf" +) + +const ( + intelRaplPath = "/sys/devices/virtual/powercap/intel-rapl" + intelRaplSocketPartialPath = "%s/intel-rapl:%s" + energyUjPartialPath = "%s/energy_uj" + maxEnergyRangeUjPartialPath = "%s/max_energy_range_uj" + maxPowerUwPartialPath = "%s/constraint_0_max_power_uw" + intelRaplDramPartialPath = "%s/intel-rapl:%s/%s" + intelRaplDramNamePartialPath = "%s/name" +) + +// raplService is responsible for interactions with RAPL. +type raplService interface { + initializeRaplData() + getRaplData() map[string]*raplData + retrieveAndCalculateData(socketID string) error + getConstraintMaxPowerWatts(socketID string) (float64, error) +} + +type raplServiceImpl struct { + log telegraf.Logger + data map[string]*raplData + dramFolders map[string]string + fs fileService +} + +// initializeRaplData looks for RAPL folders and initializes data map with fetched information. +func (r *raplServiceImpl) initializeRaplData() { + r.prepareData() + r.findDramFolders() +} + +func (r *raplServiceImpl) getRaplData() map[string]*raplData { + return r.data +} + +func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error { + socketRaplPath := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + socketEnergyUjPath := fmt.Sprintf(energyUjPartialPath, socketRaplPath) + socketEnergyUjFile, err := os.Open(socketEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening socket energy_uj file on path %s, err: %v", socketEnergyUjPath, err) + } + defer socketEnergyUjFile.Close() + + dramRaplPath := fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, socketID, r.dramFolders[socketID]) + dramEnergyUjPath := fmt.Sprintf(energyUjPartialPath, dramRaplPath) + dramEnergyUjFile, err := os.Open(dramEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening dram energy_uj file on path %s, err: %v", dramEnergyUjPath, err) + } + defer dramEnergyUjFile.Close() + + socketMaxEnergyUjPath := fmt.Sprintf(maxEnergyRangeUjPartialPath, socketRaplPath) + socketMaxEnergyUjFile, err := os.Open(socketMaxEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening socket max_energy_range_uj file on path %s, err: %v", socketMaxEnergyUjPath, err) + } + defer socketMaxEnergyUjFile.Close() + + dramMaxEnergyUjPath := fmt.Sprintf(maxEnergyRangeUjPartialPath, dramRaplPath) + dramMaxEnergyUjFile, err := os.Open(dramMaxEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening dram max_energy_range_uj file on path %s, err: %v", dramMaxEnergyUjPath, err) + } + defer dramMaxEnergyUjFile.Close() + + return r.calculateData(socketID, socketEnergyUjFile, dramEnergyUjFile, socketMaxEnergyUjFile, dramMaxEnergyUjFile) +} + +func (r *raplServiceImpl) getConstraintMaxPowerWatts(socketID string) (float64, error) { + socketRaplPath := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + socketMaxPowerPath := fmt.Sprintf(maxPowerUwPartialPath, socketRaplPath) + socketMaxPowerFile, err := os.Open(socketMaxPowerPath) + if err != nil { + return 0, fmt.Errorf("error opening constraint_0_max_power_uw file on path %s, err: %v", socketMaxPowerPath, err) + } + defer socketMaxPowerFile.Close() + + socketMaxPower, _, err := r.fs.readFileToFloat64(socketMaxPowerFile) + return convertMicroWattToWatt(socketMaxPower), err +} + +func (r *raplServiceImpl) prepareData() { + intelRaplPrefix := "intel-rapl:" + intelRapl := fmt.Sprintf("%s%s", intelRaplPrefix, "[0-9]*") + raplPattern := fmt.Sprintf("%s/%s", intelRaplPath, intelRapl) + + raplPaths, err := r.fs.getStringsMatchingPatternOnPath(raplPattern) + if err != nil { + r.log.Errorf("error while preparing RAPL data: %v", err) + r.data = make(map[string]*raplData) + return + } + if len(raplPaths) == 0 { + r.log.Debugf("RAPL data wasn't found using pattern: %s", raplPattern) + r.data = make(map[string]*raplData) + return + } + + // If RAPL exists initialize data map (if it wasn't initialized before). + if len(r.data) == 0 { + for _, raplPath := range raplPaths { + socketID := strings.TrimPrefix(filepath.Base(raplPath), intelRaplPrefix) + r.data[socketID] = &raplData{ + socketCurrentEnergy: 0, + dramCurrentEnergy: 0, + socketEnergy: 0, + dramEnergy: 0, + readDate: 0, + } + } + } +} + +func (r *raplServiceImpl) findDramFolders() { + intelRaplPrefix := "intel-rapl:" + intelRaplDram := fmt.Sprintf("%s%s", intelRaplPrefix, "[0-9]*[0-9]*") + // Clean existing map + r.dramFolders = make(map[string]string) + + for socketID := range r.data { + path := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + raplFoldersPattern := fmt.Sprintf("%s/%s", path, intelRaplDram) + pathsToRaplFolders, err := r.fs.getStringsMatchingPatternOnPath(raplFoldersPattern) + if err != nil { + r.log.Errorf("error during lookup for rapl dram: %v", err) + continue + } + if len(pathsToRaplFolders) == 0 { + r.log.Debugf("RAPL folders weren't found using pattern: %s", raplFoldersPattern) + continue + } + + raplFolders := make([]string, 0) + for _, folderPath := range pathsToRaplFolders { + raplFolders = append(raplFolders, filepath.Base(folderPath)) + } + + r.findDramFolder(raplFolders, socketID) + } +} + +func (r *raplServiceImpl) findDramFolder(raplFolders []string, socketID string) { + for _, raplFolder := range raplFolders { + potentialDramPath := fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, socketID, raplFolder) + nameFilePath := fmt.Sprintf(intelRaplDramNamePartialPath, potentialDramPath) + read, err := r.fs.readFile(nameFilePath) + if err != nil { + r.log.Errorf("error reading file on path: %s, err: %v", nameFilePath, err) + continue + } + + // Remove new line character + trimmedString := strings.TrimRight(string(read), "\n") + if trimmedString == "dram" { + // There should be only one DRAM folder per socket + r.dramFolders[socketID] = raplFolder + return + } + } +} + +func (r *raplServiceImpl) calculateData(socketID string, socketEnergyUjFile io.Reader, dramEnergyUjFile io.Reader, + socketMaxEnergyUjFile io.Reader, dramMaxEnergyUjFile io.Reader, +) error { + newSocketEnergy, _, err := r.readEnergyInJoules(socketEnergyUjFile) + if err != nil { + return err + } + + newDramEnergy, readDate, err := r.readEnergyInJoules(dramEnergyUjFile) + if err != nil { + return err + } + + interval := convertNanoSecondsToSeconds(readDate - r.data[socketID].readDate) + r.data[socketID].readDate = readDate + if interval == 0 { + return fmt.Errorf("interval between last two Telegraf cycles is 0") + } + + if newSocketEnergy > r.data[socketID].socketEnergy { + r.data[socketID].socketCurrentEnergy = (newSocketEnergy - r.data[socketID].socketEnergy) / interval + } else { + socketMaxEnergy, _, err := r.readEnergyInJoules(socketMaxEnergyUjFile) + if err != nil { + return err + } + // When socket energy_uj counter reaches maximum value defined in max_energy_range_uj file it + // starts counting from 0. + r.data[socketID].socketCurrentEnergy = (socketMaxEnergy - r.data[socketID].socketEnergy + newSocketEnergy) / interval + } + + if newDramEnergy > r.data[socketID].dramEnergy { + r.data[socketID].dramCurrentEnergy = (newDramEnergy - r.data[socketID].dramEnergy) / interval + } else { + dramMaxEnergy, _, err := r.readEnergyInJoules(dramMaxEnergyUjFile) + if err != nil { + return err + } + // When dram energy_uj counter reaches maximum value defined in max_energy_range_uj file it + // starts counting from 0. + r.data[socketID].dramCurrentEnergy = (dramMaxEnergy - r.data[socketID].dramEnergy + newDramEnergy) / interval + } + r.data[socketID].socketEnergy = newSocketEnergy + r.data[socketID].dramEnergy = newDramEnergy + + return nil +} + +func (r *raplServiceImpl) readEnergyInJoules(reader io.Reader) (float64, int64, error) { + currentEnergy, readDate, err := r.fs.readFileToFloat64(reader) + return convertMicroJoulesToJoules(currentEnergy), readDate, err +} + +func newRaplServiceWithFs(logger telegraf.Logger, fs fileService) *raplServiceImpl { + return &raplServiceImpl{ + log: logger, + data: make(map[string]*raplData), + dramFolders: make(map[string]string), + fs: fs, + } +} diff --git a/plugins/inputs/intel_powerstat/rapl_mock_test.go b/plugins/inputs/intel_powerstat/rapl_mock_test.go new file mode 100644 index 0000000000000..7742db140ccf1 --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl_mock_test.go @@ -0,0 +1,66 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package intel_powerstat + +import mock "github.com/stretchr/testify/mock" + +// mockRaplService is an autogenerated mock type for the raplService type +type mockRaplService struct { + mock.Mock +} + +// getConstraintMaxPowerWatts provides a mock function with given fields: socketID +func (_m *mockRaplService) getConstraintMaxPowerWatts(socketID string) (float64, error) { + ret := _m.Called(socketID) + + var r0 float64 + if rf, ok := ret.Get(0).(func(string) float64); ok { + r0 = rf(socketID) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(socketID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// getRaplData provides a mock function with given fields: +func (_m *mockRaplService) getRaplData() map[string]*raplData { + ret := _m.Called() + + var r0 map[string]*raplData + if rf, ok := ret.Get(0).(func() map[string]*raplData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*raplData) + } + } + + return r0 +} + +// initializeRaplData provides a mock function with given fields: +func (_m *mockRaplService) initializeRaplData() { + _m.Called() +} + +// retrieveAndCalculateData provides a mock function with given fields: socketID +func (_m *mockRaplService) retrieveAndCalculateData(socketID string) error { + ret := _m.Called(socketID) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(socketID) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/plugins/inputs/intel_powerstat/rapl_test.go b/plugins/inputs/intel_powerstat/rapl_test.go new file mode 100644 index 0000000000000..5333ec13aaa79 --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl_test.go @@ -0,0 +1,116 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestPrepareData(t *testing.T) { + sockets := []string{"intel-rapl:0", "intel-rapl:1"} + rapl, fsMock := getRaplWithMockedFs() + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything).Return(sockets, nil).Twice() + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(sockets), len(rapl.getRaplData())) + + // Verify no data is wiped in the next calls + socketEnergy := 74563813417.0 + socketID := "0" + rapl.data[socketID].socketEnergy = socketEnergy + + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(sockets), len(rapl.getRaplData())) + require.Equal(t, socketEnergy, rapl.data[socketID].socketEnergy) + + // Verify data is wiped once there is no RAPL folders + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(nil, errors.New("missing RAPL")).Once() + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, 0, len(rapl.getRaplData())) +} + +func TestFindDramFolders(t *testing.T) { + sockets := []string{"0", "1"} + raplFolders := []string{"intel-rapl:0:1", "intel-rapl:0:2", "intel-rapl:0:3"} + rapl, fsMock := getRaplWithMockedFs() + + for _, socketID := range sockets { + rapl.data[socketID] = &raplData{} + } + + firstPath := fmt.Sprintf(intelRaplDramNamePartialPath, + fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, "0", raplFolders[2])) + secondPath := fmt.Sprintf(intelRaplDramNamePartialPath, + fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, "1", raplFolders[1])) + + fsMock. + On("getStringsMatchingPatternOnPath", mock.Anything).Return(raplFolders, nil).Twice(). + On("readFile", firstPath).Return([]byte("dram"), nil).Once(). + On("readFile", secondPath).Return([]byte("dram"), nil).Once(). + On("readFile", mock.Anything).Return([]byte("random"), nil) + + rapl.findDramFolders() + + require.Equal(t, len(sockets), len(rapl.dramFolders)) + require.Equal(t, raplFolders[2], rapl.dramFolders["0"]) + require.Equal(t, raplFolders[1], rapl.dramFolders["1"]) + fsMock.AssertNumberOfCalls(t, "readFile", 5) +} + +func TestCalculateDataOverflowCases(t *testing.T) { + socketID := "1" + rapl, fsMock := getRaplWithMockedFs() + + rapl.data[socketID] = &raplData{} + rapl.data[socketID].socketEnergy = convertMicroJoulesToJoules(23424123.1) + rapl.data[socketID].dramEnergy = convertMicroJoulesToJoules(345611233.2) + rapl.data[socketID].readDate = 54123 + + interval := int64(54343) + convertedInterval := convertNanoSecondsToSeconds(interval - rapl.data[socketID].readDate) + + newEnergy := 3343443.4 + maxEnergy := 234324546456.6 + convertedNewEnergy := convertMicroJoulesToJoules(newEnergy) + convertedMaxNewEnergy := convertMicroJoulesToJoules(maxEnergy) + + maxDramEnergy := 981230834098.3 + newDramEnergy := 4533311.1 + convertedMaxDramEnergy := convertMicroJoulesToJoules(maxDramEnergy) + convertedDramEnergy := convertMicroJoulesToJoules(newDramEnergy) + + expectedCurrentEnergy := (convertedMaxNewEnergy - rapl.data[socketID].socketEnergy + convertedNewEnergy) / convertedInterval + expectedDramCurrentEnergy := (convertedMaxDramEnergy - rapl.data[socketID].dramEnergy + convertedDramEnergy) / convertedInterval + + fsMock. + On("readFileToFloat64", mock.Anything).Return(newEnergy, int64(12321), nil).Once(). + On("readFileToFloat64", mock.Anything).Return(newDramEnergy, interval, nil).Once(). + On("readFileToFloat64", mock.Anything).Return(maxEnergy, int64(64534), nil).Once(). + On("readFileToFloat64", mock.Anything).Return(maxDramEnergy, int64(98342), nil).Once() + + require.NoError(t, rapl.calculateData(socketID, strings.NewReader(mock.Anything), strings.NewReader(mock.Anything), + strings.NewReader(mock.Anything), strings.NewReader(mock.Anything))) + + require.Equal(t, expectedCurrentEnergy, rapl.data[socketID].socketCurrentEnergy) + require.Equal(t, expectedDramCurrentEnergy, rapl.data[socketID].dramCurrentEnergy) +} + +func getRaplWithMockedFs() (*raplServiceImpl, *mockFileService) { + logger := testutil.Logger{Name: "PowerPluginTest"} + fsMock := &mockFileService{} + rapl := newRaplServiceWithFs(logger, fsMock) + + return rapl, fsMock +} diff --git a/plugins/inputs/intel_powerstat/unit_converter.go b/plugins/inputs/intel_powerstat/unit_converter.go new file mode 100644 index 0000000000000..7dd8c0d0d1aa0 --- /dev/null +++ b/plugins/inputs/intel_powerstat/unit_converter.go @@ -0,0 +1,50 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "math" + "strconv" +) + +const ( + microJouleToJoule = 1.0 / 1000000 + microWattToWatt = 1.0 / 1000000 + kiloHertzToMegaHertz = 1.0 / 1000 + nanoSecondsToSeconds = 1.0 / 1000000000 + cyclesToHertz = 1.0 / 1000000 +) + +func convertMicroJoulesToJoules(mJ float64) float64 { + return mJ * microJouleToJoule +} + +func convertMicroWattToWatt(mW float64) float64 { + return mW * microWattToWatt +} + +func convertKiloHertzToMegaHertz(kiloHertz float64) float64 { + return kiloHertz * kiloHertzToMegaHertz +} + +func convertNanoSecondsToSeconds(ns int64) float64 { + return float64(ns) * nanoSecondsToSeconds +} + +func convertProcessorCyclesToHertz(pc uint64) float64 { + return float64(pc) * cyclesToHertz +} + +func roundFloatToNearestTwoDecimalPlaces(n float64) float64 { + return math.Round(n*100) / 100 +} + +func convertIntegerArrayToStringArray(array []int64) []string { + stringArray := make([]string, 0) + for _, value := range array { + stringArray = append(stringArray, strconv.FormatInt(value, 10)) + } + + return stringArray +} diff --git a/plugins/inputs/intel_rdt/README.md b/plugins/inputs/intel_rdt/README.md index 1a6e55f6a7fb9..cc98c13b6c0e0 100644 --- a/plugins/inputs/intel_rdt/README.md +++ b/plugins/inputs/intel_rdt/README.md @@ -1,22 +1,52 @@ # Intel RDT Input Plugin -The intel_rdt plugin collects information provided by monitoring features of -Intel Resource Director Technology (Intel(R) RDT) like Cache Monitoring Technology (CMT), -Memory Bandwidth Monitoring (MBM), Cache Allocation Technology (CAT) and Code -and Data Prioritization (CDP) Technology provide the hardware framework to monitor -and control the utilization of shared resources, like last level cache, memory bandwidth. -These Technologies comprise Intel’s Resource Director Technology (RDT). -As multithreaded and multicore platform architectures emerge, -running workloads in single-threaded, multithreaded, or complex virtual machine environment, -the last level cache and memory bandwidth are key resources to manage. Intel introduces CMT, -MBM, CAT and CDP to manage these various workloads across shared resources. - -To gather Intel RDT metrics plugin uses _pqos_ cli tool which is a part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). +The `intel_rdt` plugin collects information provided by monitoring features of +the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the hardware framework to monitor +and control the utilization of shared resources (ex: last level cache, memory bandwidth). + +### About Intel RDT +Intel’s Resource Director Technology (RDT) framework consists of: +- Cache Monitoring Technology (CMT) +- Memory Bandwidth Monitoring (MBM) +- Cache Allocation Technology (CAT) +- Code and Data Prioritization (CDP) + +As multithreaded and multicore platform architectures emerge, the last level cache and +memory bandwidth are key resources to manage for running workloads in single-threaded, +multithreaded, or complex virtual machine environments. Intel introduces CMT, MBM, CAT +and CDP to manage these workloads across shared resources. + +### Prerequsities - PQoS Tool +To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which is a +part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). Before using this plugin please be sure _pqos_ is properly installed and configured regarding that the plugin run _pqos_ to work with `OS Interface` mode. This plugin supports _pqos_ version 4.0.0 and above. -Be aware pqos tool needs root privileges to work properly. +Note: pqos tool needs root privileges to work properly. Metrics will be constantly reported from the following `pqos` commands within the given interval: +#### If telegraf does not run as the root user + +The `pqos` binary needs to run as root. If telegraf is running as a non-root user, you may enable sudo +to allow `pqos` to run correctly. +The `pqos` command requires root level access to run. There are two options to +overcome this if you run telegraf as a non-root user. + +It is possible to update the pqos binary with setuid using `chmod u+s +/path/to/pqos`. This approach is simple and requires no modification to the +Telegraf configuration, however pqos is not a read-only tool and there are +security implications for making such a command setuid root. + +Alternately, you may enable sudo to allow `pqos` to run correctly, as follows: + +Add the following to your sudoers file (assumes telegraf runs as a user named `telegraf`): + +``` +telegraf ALL=(ALL) NOPASSWD:/usr/sbin/pqos -r --iface-os --mon-file-type=csv --mon-interval=* +``` + +If you wish to use sudo, you must also add `use_sudo = true` to the Telegraf +configuration (see below). + #### In case of cores monitoring: ``` pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-core=all:[CORES]\;mbt:[CORES] @@ -46,29 +76,33 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t ### Configuration ```toml # Read Intel RDT metrics -[[inputs.IntelRDT]] - ## Optionally set sampling interval to Nx100ms. - ## This value is propagated to pqos tool. Interval format is defined by pqos itself. - ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. - # sampling_interval = "10" +[[inputs.intel_rdt]] + ## Optionally set sampling interval to Nx100ms. + ## This value is propagated to pqos tool. Interval format is defined by pqos itself. + ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. + # sampling_interval = "10" - ## Optionally specify the path to pqos executable. - ## If not provided, auto discovery will be performed. - # pqos_path = "/usr/local/bin/pqos" + ## Optionally specify the path to pqos executable. + ## If not provided, auto discovery will be performed. + # pqos_path = "/usr/local/bin/pqos" - ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. - ## If not provided, default value is false. - # shortened_metrics = false - - ## Specify the list of groups of CPU core(s) to be provided as pqos input. - ## Mandatory if processes aren't set and forbidden if processes are specified. - ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] - # cores = ["0-3"] - - ## Specify the list of processes for which Metrics will be collected. - ## Mandatory if cores aren't set and forbidden if cores are specified. - ## e.g. ["qemu", "pmd"] - # processes = ["process"] + ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. + ## If not provided, default value is false. + # shortened_metrics = false + + ## Specify the list of groups of CPU core(s) to be provided as pqos input. + ## Mandatory if processes aren't set and forbidden if processes are specified. + ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] + # cores = ["0-3"] + + ## Specify the list of processes for which Metrics will be collected. + ## Mandatory if cores aren't set and forbidden if cores are specified. + ## e.g. ["qemu", "pmd"] + # processes = ["process"] + + ## Specify if the pqos process should be called with sudo. + ## Mandatory if the telegraf process does not run as root. + # use_sudo = false ``` ### Exposed metrics @@ -78,20 +112,20 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t | MBR | Memory Bandwidth on Remote NUMA Node | Memory bandwidth utilization by the relevant CPU core/process on the remote NUMA memory channel | | MBT | Total Memory Bandwidth | Total memory bandwidth utilized by a CPU core/process on local and remote NUMA memory channels | | LLC | L3 Cache Occupancy | Total Last Level Cache occupancy by a CPU core/process | -| *LLC_Misses | L3 Cache Misses | Total Last Level Cache misses by a CPU core/process | -| *IPC | Instructions Per Cycle | Total instructions per cycle executed by a CPU core/process | +| LLC_Misses* | L3 Cache Misses | Total Last Level Cache misses by a CPU core/process | +| IPC* | Instructions Per Cycle | Total instructions per cycle executed by a CPU core/process | *optional ### Troubleshooting -Pointing to non-existing core will lead to throwing an error by _pqos_ and plugin will not work properly. -Be sure to check if provided core number exists within desired system. +Pointing to non-existing cores will lead to throwing an error by _pqos_ and the plugin will not work properly. +Be sure to check provided core number exists within desired system. -Be aware reading Intel RDT metrics by _pqos_ cannot be done simultaneously on the same resource. -So be sure to not use any other _pqos_ instance which is monitoring the same cores or PIDs within working system. -Also there is no possibility to monitor same cores or PIDs on different groups. +Be aware, reading Intel RDT metrics by _pqos_ cannot be done simultaneously on the same resource. +Do not use any other _pqos_ instance that is monitoring the same cores or PIDs within the working system. +It is not possible to monitor same cores or PIDs on different groups. -Pids association for the given process could be manually checked by `pidof` command. E.g: +PIDs associated for the given process could be manually checked by `pidof` command. E.g: ``` pidof PROCESS ``` @@ -105,4 +139,4 @@ where `PROCESS` is process name. > rdt_metric,cores=12\,19,host=r2-compute-20,name=MBL,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=MBR,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=MBT,process=top value=0 1598962030000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index e61266c0a4f6b..d354bb855aacf 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt @@ -13,6 +14,7 @@ import ( "strconv" "strings" "sync" + "syscall" "time" "github.com/google/go-cmp/cmp" @@ -45,6 +47,7 @@ type IntelRDT struct { Processes []string `toml:"processes"` SamplingInterval int32 `toml:"sampling_interval"` ShortenedMetrics bool `toml:"shortened_metrics"` + UseSudo bool `toml:"use_sudo"` Log telegraf.Logger `toml:"-"` Publisher Publisher `toml:"-"` @@ -63,6 +66,12 @@ type processMeasurement struct { measurement string } +type splitCSVLine struct { + timeValue string + metricsValues []string + coreOrPIDsValues []string +} + // All gathering is done in the Start function func (r *IntelRDT) Gather(_ telegraf.Accumulator) error { return nil @@ -96,6 +105,10 @@ func (r *IntelRDT) SampleConfig() string { ## Mandatory if cores aren't set and forbidden if cores are specified. ## e.g. ["qemu", "pmd"] # processes = ["process"] + + ## Specify if the pqos process should be called with sudo. + ## Mandatory if the telegraf process does not run as root. + # use_sudo = false ` } @@ -223,8 +236,8 @@ func (r *IntelRDT) associateProcessesWithPIDs(providedProcesses []string) (map[s } for _, availableProcess := range availableProcesses { if choice.Contains(availableProcess.Name, providedProcesses) { - PID := availableProcess.PID - mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", PID) + "," + pid := availableProcess.PID + mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", pid) + "," } } for key := range mapProcessPIDs { @@ -239,21 +252,25 @@ func (r *IntelRDT) createArgsAndStartPQOS(ctx context.Context) { if len(r.parsedCores) != 0 { coresArg := createArgCores(r.parsedCores) args = append(args, coresArg) - go r.readData(args, nil, ctx) - + go r.readData(ctx, args, nil) } else if len(r.processesPIDsMap) != 0 { processArg := createArgProcess(r.processesPIDsMap) args = append(args, processArg) - go r.readData(args, r.processesPIDsMap, ctx) + go r.readData(ctx, args, r.processesPIDsMap) } - return } -func (r *IntelRDT) readData(args []string, processesPIDsAssociation map[string]string, ctx context.Context) { +func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAssociation map[string]string) { r.wg.Add(1) defer r.wg.Done() - cmd := exec.Command(r.PqosPath, append(args)...) + cmd := exec.Command(r.PqosPath, args...) + + if r.UseSudo { + // run pqos with `/bin/sh -c "sudo /path/to/pqos ..."` + args = []string{"-c", fmt.Sprintf("sudo %s %s", r.PqosPath, strings.Replace(strings.Join(args, " "), ";", "\\;", -1))} + cmd = exec.Command("/bin/sh", args...) + } cmdReader, err := cmd.StdoutPipe() if err != nil { @@ -279,12 +296,12 @@ func (r *IntelRDT) readData(args []string, processesPIDsAssociation map[string]s }() err = cmd.Start() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) return } err = cmd.Wait() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) } } @@ -299,11 +316,9 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati */ toOmit := pqosInitOutputLinesNumber - // omit first measurements which are zeroes - if len(r.parsedCores) != 0 { + if len(r.parsedCores) != 0 { // omit first measurements which are zeroes toOmit = toOmit + len(r.parsedCores) - // specify how many lines should pass before stopping - } else if len(processesPIDsAssociation) != 0 { + } else if len(processesPIDsAssociation) != 0 { // specify how many lines should pass before stopping toOmit = toOmit + len(processesPIDsAssociation) } for omitCounter := 0; omitCounter < toOmit; omitCounter++ { @@ -318,13 +333,13 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati if len(r.Processes) != 0 { newMetric := processMeasurement{} - PIDs, err := findPIDsInMeasurement(out) + pids, err := findPIDsInMeasurement(out) if err != nil { r.errorChan <- err break } for processName, PIDsProcess := range processesPIDsAssociation { - if PIDs == PIDsProcess { + if pids == PIDsProcess { newMetric.name = processName newMetric.measurement = out } @@ -337,14 +352,30 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati } func shutDownPqos(pqos *exec.Cmd) error { + timeout := time.Second * 2 + if pqos.Process != nil { - err := pqos.Process.Signal(os.Interrupt) - if err != nil { - err = pqos.Process.Kill() - if err != nil { - return fmt.Errorf("failed to shut down pqos: %v", err) + // try to send interrupt signal, ignore err for now + _ = pqos.Process.Signal(os.Interrupt) + + // wait and constantly check if pqos is still running + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + if err := pqos.Process.Signal(syscall.Signal(0)); err == os.ErrProcessDone { + return nil + } else if ctx.Err() != nil { + break } } + + // if pqos is still running after some period, try to kill it + // this will send SIGTERM to pqos, and leave garbage in `/sys/fs/resctrl/mon_groups` + // fixed in https://github.com/intel/intel-cmt-cat/issues/197 + err := pqos.Process.Kill() + if err != nil { + return fmt.Errorf("failed to shut down pqos: %v", err) + } } return nil } @@ -457,29 +488,29 @@ func validateAndParseCores(coreStr string) ([]int, error) { func findPIDsInMeasurement(measurements string) (string, error) { // to distinguish PIDs from Cores (PIDs should be in quotes) var insideQuoteRegex = regexp.MustCompile(`"(.*?)"`) - PIDsMatch := insideQuoteRegex.FindStringSubmatch(measurements) - if len(PIDsMatch) < 2 { + pidsMatch := insideQuoteRegex.FindStringSubmatch(measurements) + if len(pidsMatch) < 2 { return "", fmt.Errorf("cannot find PIDs in measurement line") } - PIDs := PIDsMatch[1] - return PIDs, nil + pids := pidsMatch[1] + return pids, nil } -func splitCSVLineIntoValues(line string) (timeValue string, metricsValues, coreOrPIDsValues []string, err error) { +func splitCSVLineIntoValues(line string) (splitCSVLine, error) { values, err := splitMeasurementLine(line) if err != nil { - return "", nil, nil, err + return splitCSVLine{}, err } - timeValue = values[0] + timeValue := values[0] // Because pqos csv format is broken when many cores are involved in PID or // group of PIDs, there is need to work around it. E.g.: // Time,PID,Core,IPC,LLC Misses,LLC[KB],MBL[MB/s],MBR[MB/s],MBT[MB/s] // 2020-08-12 13:34:36,"45417,29170,",37,44,0.00,0,0.0,0.0,0.0,0.0 - metricsValues = values[len(values)-numberOfMetrics:] - coreOrPIDsValues = values[1 : len(values)-numberOfMetrics] + metricsValues := values[len(values)-numberOfMetrics:] + coreOrPIDsValues := values[1 : len(values)-numberOfMetrics] - return timeValue, metricsValues, coreOrPIDsValues, nil + return splitCSVLine{timeValue, metricsValues, coreOrPIDsValues}, nil } func validateInterval(interval int32) error { @@ -498,7 +529,7 @@ func splitMeasurementLine(line string) ([]string, error) { } func parseTime(value string) (time.Time, error) { - timestamp, err := time.Parse(timestampFormat, value) + timestamp, err := time.ParseInLocation(timestampFormat, value, time.Local) if err != nil { return time.Time{}, err } @@ -541,7 +572,7 @@ func makeRange(min, max int) []int { } func init() { - inputs.Add("IntelRDT", func() telegraf.Input { + inputs.Add("intel_rdt", func() telegraf.Input { rdt := IntelRDT{} pathPqos, _ := exec.LookPath("pqos") if len(pathPqos) > 0 { diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index 7e876425724ec..18dd2e93aa1c1 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt @@ -51,18 +52,18 @@ func TestSplitCSVLineIntoValues(t *testing.T) { expectedMetricsValue := []string{"0.00", "0", "0.0", "0.0", "0.0", "0.0"} expectedCoreOrPidsValue := []string{"\"45417", "29170\"", "37", "44"} - timeValue, metricsValue, coreOrPidsValue, err := splitCSVLineIntoValues(line) + splitCSV, err := splitCSVLineIntoValues(line) assert.Nil(t, err) - assert.Equal(t, expectedTimeValue, timeValue) - assert.Equal(t, expectedMetricsValue, metricsValue) - assert.Equal(t, expectedCoreOrPidsValue, coreOrPidsValue) + assert.Equal(t, expectedTimeValue, splitCSV.timeValue) + assert.Equal(t, expectedMetricsValue, splitCSV.metricsValues) + assert.Equal(t, expectedCoreOrPidsValue, splitCSV.coreOrPIDsValues) wrongLine := "2020-08-12 13:34:36,37,44,0.00,0,0.0" - timeValue, metricsValue, coreOrPidsValue, err = splitCSVLineIntoValues(wrongLine) + splitCSV, err = splitCSVLineIntoValues(wrongLine) assert.NotNil(t, err) - assert.Equal(t, "", timeValue) - assert.Nil(t, nil, metricsValue) - assert.Nil(t, nil, coreOrPidsValue) + assert.Equal(t, "", splitCSV.timeValue) + assert.Nil(t, nil, splitCSV.metricsValues) + assert.Nil(t, nil, splitCSV.coreOrPIDsValues) } func TestFindPIDsInMeasurement(t *testing.T) { @@ -106,7 +107,6 @@ func TestCreateArgsCores(t *testing.T) { assert.EqualValues(t, expected, result) cores = []string{"1,2,3", "4,5,6"} - expected = "--mon-core=" expectedPrefix := "--mon-core=" expectedSubstring := "all:[1,2,3];mbt:[1,2,3];" expectedSubstring2 := "all:[4,5,6];mbt:[4,5,6];" diff --git a/plugins/inputs/intel_rdt/intel_rdt_windows.go b/plugins/inputs/intel_rdt/intel_rdt_windows.go index e3ab0978fb374..64f9ebbe94b68 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_windows.go +++ b/plugins/inputs/intel_rdt/intel_rdt_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/processes.go b/plugins/inputs/intel_rdt/processes.go index ff86a4e6b745c..dd172b6d92dd2 100644 --- a/plugins/inputs/intel_rdt/processes.go +++ b/plugins/inputs/intel_rdt/processes.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index 5ca9890472b27..4fdb91dc7b128 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -1,15 +1,30 @@ +//go:build !windows // +build !windows package intel_rdt import ( "context" + "errors" "strings" "time" "github.com/influxdata/telegraf" ) +type parsedCoresMeasurement struct { + cores string + values []float64 + time time.Time +} + +type parsedProcessMeasurement struct { + process string + cores string + values []float64 + time time.Time +} + // Publisher for publish new RDT metrics to telegraf accumulator type Publisher struct { acc telegraf.Accumulator @@ -18,7 +33,6 @@ type Publisher struct { BufferChanProcess chan processMeasurement BufferChanCores chan string errChan chan error - stopChan chan bool } func NewPublisher(acc telegraf.Accumulator, log telegraf.Logger, shortenedMetrics bool) Publisher { @@ -50,50 +64,48 @@ func (p *Publisher) publish(ctx context.Context) { } func (p *Publisher) publishCores(measurement string) { - coresString, values, timestamp, err := parseCoresMeasurement(measurement) + parsedCoresMeasurement, err := parseCoresMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorCores(coresString, values, timestamp) - return + p.addToAccumulatorCores(parsedCoresMeasurement) } func (p *Publisher) publishProcess(measurement processMeasurement) { - process, coresString, values, timestamp, err := parseProcessesMeasurement(measurement) + parsedProcessMeasurement, err := parseProcessesMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorProcesses(process, coresString, values, timestamp) - return + p.addToAccumulatorProcesses(parsedProcessMeasurement) } -func parseCoresMeasurement(measurements string) (string, []float64, time.Time, error) { +func parseCoresMeasurement(measurements string) (parsedCoresMeasurement, error) { var values []float64 - timeValue, metricsValues, cores, err := splitCSVLineIntoValues(measurements) + splitCSV, err := splitCSVLineIntoValues(measurements) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } // change string slice to one string and separate it by coma - coresString := strings.Join(cores, ",") + coresString := strings.Join(splitCSV.coreOrPIDsValues, ",") // trim unwanted quotes coresString = strings.Trim(coresString, "\"") - for _, metric := range metricsValues { + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } values = append(values, parsedValue) } - return coresString, values, timestamp, nil + return parsedCoresMeasurement{coresString, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -104,41 +116,47 @@ func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, tags := map[string]string{} fields := make(map[string]interface{}) - tags["cores"] = cores + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } } -func parseProcessesMeasurement(measurement processMeasurement) (string, string, []float64, time.Time, error) { - var values []float64 - timeValue, metricsValues, coreOrPidsValues, pids, err := parseProcessMeasurement(measurement.measurement) +func parseProcessesMeasurement(measurement processMeasurement) (parsedProcessMeasurement, error) { + splitCSV, err := splitCSVLineIntoValues(measurement.measurement) + if err != nil { + return parsedProcessMeasurement{}, err + } + pids, err := findPIDsInMeasurement(measurement.measurement) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err + } + lenOfPIDs := len(strings.Split(pids, ",")) + if lenOfPIDs > len(splitCSV.coreOrPIDsValues) { + return parsedProcessMeasurement{}, errors.New("detected more pids (quoted) than actual number of pids in csv line") } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } actualProcess := measurement.name - lenOfPids := len(strings.Split(pids, ",")) - cores := coreOrPidsValues[lenOfPids:] - coresString := strings.Trim(strings.Join(cores, ","), `"`) + cores := strings.Trim(strings.Join(splitCSV.coreOrPIDsValues[lenOfPIDs:], ","), `"`) - for _, metric := range metricsValues { + var values []float64 + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } values = append(values, parsedValue) } - return actualProcess, coresString, values, timestamp, nil + return parsedProcessMeasurement{actualProcess, cores, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -149,23 +167,11 @@ func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metr tags := map[string]string{} fields := make(map[string]interface{}) - tags["process"] = process - tags["cores"] = cores + tags["process"] = measurement.process + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) - } -} - -func parseProcessMeasurement(measurements string) (string, []string, []string, string, error) { - timeValue, metricsValues, coreOrPidsValues, err := splitCSVLineIntoValues(measurements) - if err != nil { - return "", nil, nil, "", err - } - pids, err := findPIDsInMeasurement(measurements) - if err != nil { - return "", nil, nil, "", err + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } - return timeValue, metricsValues, coreOrPidsValues, pids, nil } diff --git a/plugins/inputs/intel_rdt/publisher_test.go b/plugins/inputs/intel_rdt/publisher_test.go index 5248ede7a16db..2529a2235a1b9 100644 --- a/plugins/inputs/intel_rdt/publisher_test.go +++ b/plugins/inputs/intel_rdt/publisher_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt @@ -36,29 +37,29 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.Nil(t, err) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) t.Run("not valid measurement string", func(t *testing.T) { measurement := "not, valid, measurement" - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid values string", func(t *testing.T) { measurement := fmt.Sprintf("%s,%s,%s,%s,%f,%f,%f,%f", @@ -71,12 +72,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid timestamp format", func(t *testing.T) { invalidTimestamp := "2020-08-12-21 13:34:" @@ -90,12 +91,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) } @@ -118,44 +119,36 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) newMeasurement := processMeasurement{ name: processName, measurement: measurement, } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + result, err := parseProcessesMeasurement(newMeasurement) assert.Nil(t, err) - assert.Equal(t, processName, actualProcess) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, processName, result.process) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) - t.Run("not valid measurement string", func(t *testing.T) { - processName := "process_name" - measurement := "invalid,measurement,format" - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid timestamp format", func(t *testing.T) { - invalidTimestamp := "2020-20-20-31" - measurement := fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", + invalidTimestamp := "2020-20-20-31" + negativeTests := []struct { + name string + measurement string + }{{ + name: "not valid measurement string", + measurement: "invalid,measurement,format", + }, { + name: "not valid timestamp format", + measurement: fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", invalidTimestamp, pids, cores, @@ -164,44 +157,42 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["LLC"], metricsValues["MBL"], metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid values string", func(t *testing.T) { - measurement := fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", - timestamp, - pids, - cores, - "1##", - "da", - metricsValues["LLC"], - metricsValues["MBL"], - metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + metricsValues["MBT"]), + }, + { + name: "not valid values string", + measurement: fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", + timestamp, + pids, + cores, + "1##", + "da", + metricsValues["LLC"], + metricsValues["MBL"], + metricsValues["MBR"], + metricsValues["MBT"]), + }, + { + name: "not valid csv line with quotes", + measurement: "0000-08-02 0:00:00,,\",,,,,,,,,,,,,,,,,,,,,,,,\",,", + }, + } - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) + for _, test := range negativeTests { + t.Run(test.name, func(t *testing.T) { + newMeasurement := processMeasurement{ + name: processName, + measurement: test.measurement, + } + result, err := parseProcessesMeasurement(newMeasurement) + + assert.NotNil(t, err) + assert.Equal(t, "", result.process) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) + }) + } } func TestAddToAccumulatorCores(t *testing.T) { @@ -211,9 +202,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetrics { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -225,9 +216,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetricsShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -243,9 +234,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcesses { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -258,9 +249,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcessesShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) diff --git a/plugins/inputs/internal/internal_test.go b/plugins/inputs/internal/internal_test.go index 4cdba9099edf0..0b89a974a0a74 100644 --- a/plugins/inputs/internal/internal_test.go +++ b/plugins/inputs/internal/internal_test.go @@ -6,21 +6,21 @@ import ( "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSelfPlugin(t *testing.T) { s := NewSelf() acc := &testutil.Accumulator{} - s.Gather(acc) - assert.True(t, acc.HasMeasurement("internal_memstats")) + require.NoError(t, s.Gather(acc)) + require.True(t, acc.HasMeasurement("internal_memstats")) // test that a registered stat is incremented stat := selfstat.Register("mytest", "test", map[string]string{"test": "foo"}) stat.Incr(1) stat.Incr(2) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(3), @@ -34,7 +34,7 @@ func TestSelfPlugin(t *testing.T) { // test that a registered stat is set properly stat.Set(101) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(101), @@ -51,7 +51,7 @@ func TestSelfPlugin(t *testing.T) { timing := selfstat.RegisterTiming("mytest", "test_ns", map[string]string{"test": "foo"}) timing.Incr(100) timing.Incr(200) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(101), diff --git a/plugins/inputs/internet_speed/README.md b/plugins/inputs/internet_speed/README.md new file mode 100644 index 0000000000000..f9a71446f4979 --- /dev/null +++ b/plugins/inputs/internet_speed/README.md @@ -0,0 +1,30 @@ +# Internet Speed Monitor + +The `Internet Speed Monitor` collects data about the internet speed on the system. + +## Configuration + +```toml +# Monitors internet speed in the network +[[inputs.internet_speed]] + ## Sets if runs file download test + ## Default: false + enable_file_download = false +``` + +## Metrics + +It collects latency, download speed and upload speed + + +| Name | filed name | type | Unit | +| -------------- | ---------- | ------- | ---- | +| Download Speed | download | float64 | Mbps | +| Upload Speed | upload | float64 | Mbps | +| Latency | latency | float64 | ms | + +## Example Output + +```sh +internet_speed,host=Sanyam-Ubuntu download=41.791,latency=28.518,upload=59.798 1631031183000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/internet_speed/internet_speed.go b/plugins/inputs/internet_speed/internet_speed.go new file mode 100644 index 0000000000000..58fb29c5949c1 --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed.go @@ -0,0 +1,87 @@ +package internet_speed + +import ( + "fmt" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/showwin/speedtest-go/speedtest" +) + +// InternetSpeed is used to store configuration values. +type InternetSpeed struct { + EnableFileDownload bool `toml:"enable_file_download"` + Log telegraf.Logger `toml:"-"` +} + +const sampleConfig = ` + ## Sets if runs file download test + ## Default: false + enable_file_download = false +` + +// Description returns information about the plugin. +func (is *InternetSpeed) Description() string { + return "Monitors internet speed using speedtest.net service" +} + +// SampleConfig displays configuration instructions. +func (is *InternetSpeed) SampleConfig() string { + return sampleConfig +} + +const measurement = "internet_speed" + +func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { + user, err := speedtest.FetchUserInfo() + if err != nil { + return fmt.Errorf("fetching user info failed: %v", err) + } + serverList, err := speedtest.FetchServerList(user) + if err != nil { + return fmt.Errorf("fetching server list failed: %v", err) + } + + if len(serverList.Servers) < 1 { + return fmt.Errorf("no servers found") + } + s := serverList.Servers[0] + is.Log.Debug("Starting Speed Test") + is.Log.Debug("Running Ping...") + err = s.PingTest() + if err != nil { + return fmt.Errorf("ping test failed: %v", err) + } + is.Log.Debug("Running Download...") + err = s.DownloadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("download test failed: %v", err) + } + is.Log.Debug("Running Upload...") + err = s.UploadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("upload test failed failed: %v", err) + } + + is.Log.Debug("Test finished.") + + fields := make(map[string]interface{}) + fields["download"] = s.DLSpeed + fields["upload"] = s.ULSpeed + fields["latency"] = timeDurationMillisecondToFloat64(s.Latency) + + tags := make(map[string]string) + + acc.AddFields(measurement, fields, tags) + return nil +} +func init() { + inputs.Add("internet_speed", func() telegraf.Input { + return &InternetSpeed{} + }) +} + +func timeDurationMillisecondToFloat64(d time.Duration) float64 { + return float64(d) / float64(time.Millisecond) +} diff --git a/plugins/inputs/internet_speed/internet_speed_test.go b/plugins/inputs/internet_speed/internet_speed_test.go new file mode 100644 index 0000000000000..669426ff683ad --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed_test.go @@ -0,0 +1,44 @@ +package internet_speed + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGathering(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: false, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + + require.NoError(t, internetSpeed.Gather(acc)) +} + +func TestDataGen(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: false, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + require.NoError(t, internetSpeed.Gather(acc)) + + metric, ok := acc.Get("internet_speed") + require.True(t, ok) + + tags := metric.Tags + + fields := metric.Fields + + acc.AssertContainsTaggedFields(t, "internet_speed", fields, tags) +} diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 39b3020ddbd39..d9e9dd287361c 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -13,7 +13,7 @@ import ( ) type Interrupts struct { - CpuAsTag bool `toml:"cpu_as_tag"` + CPUAsTag bool `toml:"cpu_as_tag"` } type IRQ struct { @@ -57,7 +57,7 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { if scanner.Scan() { cpus := strings.Fields(scanner.Text()) if cpus[0] != "CPU0" { - return nil, fmt.Errorf("Expected first line to start with CPU0, but was %s", scanner.Text()) + return nil, fmt.Errorf("expected first line to start with CPU0, but was %s", scanner.Text()) } cpucount = len(cpus) } @@ -93,7 +93,7 @@ scan: irqs = append(irqs, *irq) } if scanner.Err() != nil { - return nil, fmt.Errorf("Error scanning file: %s", scanner.Err()) + return nil, fmt.Errorf("error scanning file: %s", scanner.Err()) } return irqs, nil } @@ -110,22 +110,30 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { func (s *Interrupts) Gather(acc telegraf.Accumulator) error { for measurement, file := range map[string]string{"interrupts": "/proc/interrupts", "soft_interrupts": "/proc/softirqs"} { - f, err := os.Open(file) + irqs, err := parseFile(file) if err != nil { - acc.AddError(fmt.Errorf("Could not open file: %s", file)) + acc.AddError(err) continue } - defer f.Close() - irqs, err := parseInterrupts(f) - if err != nil { - acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) - continue - } - reportMetrics(measurement, irqs, acc, s.CpuAsTag) + reportMetrics(measurement, irqs, acc, s.CPUAsTag) } return nil } +func parseFile(file string) ([]IRQ, error) { + f, err := os.Open(file) + if err != nil { + return nil, fmt.Errorf("could not open file: %s", file) + } + defer f.Close() + + irqs, err := parseInterrupts(f) + if err != nil { + return nil, fmt.Errorf("parsing %s: %s", file, err) + } + return irqs, nil +} + func reportMetrics(measurement string, irqs []IRQ, acc telegraf.Accumulator, cpusAsTags bool) { for _, irq := range irqs { tags, fields := gatherTagsFields(irq) diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 63ff765b678dd..3ed0cd394cfdc 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -13,13 +13,13 @@ import ( // Setup and helper functions // ===================================================================================== -func expectCpuAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { +func expectCPUAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { for idx, value := range irq.Cpus { m.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"count": value}, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device, "cpu": fmt.Sprintf("cpu%d", idx)}) } } -func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { +func expectCPUAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { fields := map[string]interface{}{} total := int64(0) for idx, count := range irq.Cpus { @@ -70,7 +70,7 @@ func TestCpuAsTagsSoftIrqs(t *testing.T) { reportMetrics("soft_interrupts", irqs, acc, true) for _, irq := range softIrqsExpectedArgs { - expectCpuAsTags(acc, t, "soft_interrupts", irq) + expectCPUAsTags(acc, t, "soft_interrupts", irq) } } @@ -79,7 +79,7 @@ func TestCpuAsFieldsSoftIrqs(t *testing.T) { reportMetrics("soft_interrupts", irqs, acc, false) for _, irq := range softIrqsExpectedArgs { - expectCpuAsFields(acc, t, "soft_interrupts", irq) + expectCPUAsFields(acc, t, "soft_interrupts", irq) } } @@ -142,7 +142,7 @@ func TestCpuAsTagsHwIrqs(t *testing.T) { reportMetrics("interrupts", irqs, acc, true) for _, irq := range hwIrqsExpectedArgs { - expectCpuAsTags(acc, t, "interrupts", irq) + expectCPUAsTags(acc, t, "interrupts", irq) } } @@ -151,6 +151,6 @@ func TestCpuAsFieldsHwIrqs(t *testing.T) { reportMetrics("interrupts", irqs, acc, false) for _, irq := range hwIrqsExpectedArgs { - expectCpuAsFields(acc, t, "interrupts", irq) + expectCPUAsFields(acc, t, "interrupts", irq) } } diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 0f9faa97f1f3d..609409985cb35 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -19,6 +19,11 @@ When one or more servers are specified, the plugin will use the following comman ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ``` +Any of the following parameters will be added to the aformentioned query if they're configured: +``` +-y hex_key -L privilege +``` + ### Configuration ```toml @@ -53,6 +58,18 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## Schema Version: (Optional, defaults to version 1) metric_version = 2 + + ## Optionally provide the hex key for the IMPI connection. + # hex_key = "" + + ## If ipmitool should use a cache + ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) + ## the cache file may not work well for you if some sensors come up late + # use_cache = false + + ## Path to the ipmitools cache file (defaults to OS temp dir) + ## The provided path must exist and must be writable + # cache_path = "" ``` ### Measurements diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 7f6a4c3594f61..b67ba06b9a619 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -15,11 +15,14 @@ type Connection struct { Port int Interface string Privilege string + HexKey string } -func NewConnection(server string, privilege string) *Connection { - conn := &Connection{} - conn.Privilege = privilege +func NewConnection(server, privilege, hexKey string) *Connection { + conn := &Connection{ + Privilege: privilege, + HexKey: hexKey, + } inx1 := strings.LastIndex(server, "@") inx2 := strings.Index(server, "(") @@ -29,8 +32,10 @@ func NewConnection(server string, privilege string) *Connection { security := server[0:inx1] connstr = server[inx1+1:] up := strings.SplitN(security, ":", 2) - conn.Username = up[0] - conn.Password = up[1] + if len(up) == 2 { + conn.Username = up[0] + conn.Password = up[1] + } } if inx2 > 0 { @@ -44,24 +49,27 @@ func NewConnection(server string, privilege string) *Connection { return conn } -func (t *Connection) options() []string { - intf := t.Interface +func (c *Connection) options() []string { + intf := c.Interface if intf == "" { intf = "lan" } options := []string{ - "-H", t.Hostname, - "-U", t.Username, - "-P", t.Password, + "-H", c.Hostname, + "-U", c.Username, + "-P", c.Password, "-I", intf, } - if t.Port != 0 { - options = append(options, "-p", strconv.Itoa(t.Port)) + if c.HexKey != "" { + options = append(options, "-y", c.HexKey) + } + if c.Port != 0 { + options = append(options, "-p", strconv.Itoa(c.Port)) } - if t.Privilege != "" { - options = append(options, "-L", t.Privilege) + if c.Privilege != "" { + options = append(options, "-L", c.Privilege) } return options } diff --git a/plugins/inputs/ipmi_sensor/connection_test.go b/plugins/inputs/ipmi_sensor/connection_test.go index 74944890f7a0c..3be902e3264bc 100644 --- a/plugins/inputs/ipmi_sensor/connection_test.go +++ b/plugins/inputs/ipmi_sensor/connection_test.go @@ -3,14 +3,9 @@ package ipmi_sensor import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -type conTest struct { - Got string - Want *Connection -} - func TestNewConnection(t *testing.T) { testData := []struct { addr string @@ -24,6 +19,7 @@ func TestNewConnection(t *testing.T) { Password: "PASSW0RD", Interface: "lan", Privilege: "USER", + HexKey: "0001", }, }, { @@ -34,11 +30,58 @@ func TestNewConnection(t *testing.T) { Password: "PASS:!@#$%^&*(234)_+W0RD", Interface: "lan", Privilege: "USER", + HexKey: "0001", + }, + }, + // test connection doesn't panic if incorrect symbol used + { + "USERID@PASSW0RD@lan(192.168.1.1)", + &Connection{ + Hostname: "192.168.1.1", + Username: "", + Password: "", + Interface: "lan", + Privilege: "USER", + HexKey: "0001", }, }, } for _, v := range testData { - assert.Equal(t, v.con, NewConnection(v.addr, "USER")) + require.EqualValues(t, v.con, NewConnection(v.addr, "USER", "0001")) + } +} + +func TestGetCommandOptions(t *testing.T) { + testData := []struct { + connection *Connection + options []string + }{ + { + &Connection{ + Hostname: "192.168.1.1", + Username: "user", + Password: "password", + Interface: "lan", + Privilege: "USER", + HexKey: "0001", + }, + []string{"-H", "192.168.1.1", "-U", "user", "-P", "password", "-I", "lan", "-y", "0001", "-L", "USER"}, + }, + { + &Connection{ + Hostname: "192.168.1.1", + Username: "user", + Password: "password", + Interface: "lan", + Privilege: "USER", + HexKey: "", + }, + []string{"-H", "192.168.1.1", "-U", "user", "-P", "password", "-I", "lan", "-L", "USER"}, + }, + } + + for _, data := range testData { + require.EqualValues(t, data.options, data.connection.options()) } } diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index fb53e1bc746fe..801188130c960 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -4,8 +4,9 @@ import ( "bufio" "bytes" "fmt" - "log" + "os" "os/exec" + "path/filepath" "regexp" "strconv" "strings" @@ -13,26 +14,32 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. - re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) - re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) - re_v2_parse_description = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) - re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) + execCommand = exec.Command // execCommand is used to mock commands in tests. + reV1ParseLine = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) + reV2ParseLine = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) + reV2ParseDescription = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) + reV2ParseUnit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) ) // Ipmi stores the configuration values for the ipmi_sensor input plugin type Ipmi struct { Path string Privilege string + HexKey string `toml:"hex_key"` Servers []string - Timeout internal.Duration + Timeout config.Duration MetricVersion int UseSudo bool + UseCache bool + CachePath string + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -65,6 +72,18 @@ var sampleConfig = ` ## Schema Version: (Optional, defaults to version 1) metric_version = 2 + + ## Optionally provide the hex key for the IMPI connection. + # hex_key = "" + + ## If ipmitool should use a cache + ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) + ## the cache file may not work well for you if some sensors come up late + # use_cache = false + + ## Path to the ipmitools cache file (defaults to OS temp dir) + ## The provided path must exist and must be writable + # cache_path = "" ` // SampleConfig returns the documentation about the sample configuration @@ -110,11 +129,34 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { opts := make([]string, 0) hostname := "" if server != "" { - conn := NewConnection(server, m.Privilege) + conn := NewConnection(server, m.Privilege, m.HexKey) hostname = conn.Hostname opts = conn.options() } opts = append(opts, "sdr") + if m.UseCache { + cacheFile := filepath.Join(m.CachePath, server+"_ipmi_cache") + _, err := os.Stat(cacheFile) + if os.IsNotExist(err) { + dumpOpts := opts + // init cache file + dumpOpts = append(dumpOpts, "dump") + dumpOpts = append(dumpOpts, cacheFile) + name := m.Path + if m.UseSudo { + // -n - avoid prompting the user for input of any kind + dumpOpts = append([]string{"-n", name}, dumpOpts...) + name = "sudo" + } + cmd := execCommand(name, dumpOpts...) + out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) + if err != nil { + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) + } + } + opts = append(opts, "-S") + opts = append(opts, cacheFile) + } if m.MetricVersion == 2 { opts = append(opts, "elist") } @@ -125,23 +167,23 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { name = "sudo" } cmd := execCommand(name, opts...) - out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) timestamp := time.Now() if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } if m.MetricVersion == 2 { - return parseV2(acc, hostname, out, timestamp) + return m.parseV2(acc, hostname, out, timestamp) } - return parseV1(acc, hostname, out, timestamp) + return m.parseV1(acc, hostname, out, timestamp) } -func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { +func (m *Ipmi) parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(re_v1_parse_line, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV1ParseLine, scanner.Text()) if len(ipmiFields) != 3 { continue } @@ -187,20 +229,20 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ fields["value"] = 0.0 } - acc.AddFields("ipmi_sensor", fields, tags, measured_at) + acc.AddFields("ipmi_sensor", fields, tags, measuredAt) } return scanner.Err() } -func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { +func (m *Ipmi) parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // CMOS Battery | 65h | ok | 7.1 | // Temp | 0Eh | ok | 3.1 | 55 degrees C // Drive 0 | A0h | ok | 7.1 | Drive Present scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(re_v2_parse_line, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV2ParseLine, scanner.Text()) if len(ipmiFields) < 3 || len(ipmiFields) > 4 { continue } @@ -216,7 +258,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ tags["entity_id"] = transform(ipmiFields["entity_id"]) tags["status_code"] = trim(ipmiFields["status_code"]) fields := make(map[string]interface{}) - descriptionResults := extractFieldsFromRegex(re_v2_parse_description, trim(ipmiFields["description"])) + descriptionResults := m.extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"])) // This is an analog value with a unit if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { var err error @@ -225,7 +267,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ continue } // Some implementations add an extra status to their analog units - unitResults := extractFieldsFromRegex(re_v2_parse_unit, descriptionResults["analogUnit"]) + unitResults := m.extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"]) tags["unit"] = transform(unitResults["realAnalogUnit"]) if unitResults["statusDesc"] != "" { tags["status_desc"] = transform(unitResults["statusDesc"]) @@ -241,19 +283,19 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ } } - acc.AddFields("ipmi_sensor", fields, tags, measured_at) + acc.AddFields("ipmi_sensor", fields, tags, measuredAt) } return scanner.Err() } // extractFieldsFromRegex consumes a regex with named capture groups and returns a kvp map of strings with the results -func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { +func (m *Ipmi) extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { submatches := re.FindStringSubmatch(input) results := make(map[string]string) subexpNames := re.SubexpNames() if len(subexpNames) > len(submatches) { - log.Printf("D! No matches found in '%s'", input) + m.Log.Debugf("No matches found in '%s'", input) return results } for i, name := range subexpNames { @@ -273,6 +315,16 @@ func aToFloat(val string) (float64, error) { return f, nil } +func sanitizeIPMICmd(args []string) []string { + for i, v := range args { + if v == "-P" { + args[i+1] = "REDACTED" + } + } + + return args +} + func trim(s string) string { return strings.TrimSpace(s) } @@ -289,7 +341,9 @@ func init() { if len(path) > 0 { m.Path = path } - m.Timeout = internal.Duration{Duration: time.Second * 20} + m.Timeout = config.Duration(time.Second * 20) + m.UseCache = false + m.CachePath = os.TempDir() inputs.Add("ipmi_sensor", func() telegraf.Input { m := m return &m diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index bd5e02c196e76..504a7467f5130 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -7,11 +7,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { @@ -19,8 +19,11 @@ func TestGather(t *testing.T) { Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, Path: "ipmitool", Privilege: "USER", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + HexKey: "1234567F", + Log: testutil.Logger{}, } + // overwriting exec commands with mock commands execCommand = fakeExecCommand var acc testutil.Accumulator @@ -29,11 +32,12 @@ func TestGather(t *testing.T) { require.NoError(t, err) - assert.Equal(t, acc.NFields(), 262, "non-numeric measurements should be ignored") + require.EqualValues(t, acc.NFields(), 262, "non-numeric measurements should be ignored") - conn := NewConnection(i.Servers[0], i.Privilege) - assert.Equal(t, "USERID", conn.Username) - assert.Equal(t, "lan", conn.Interface) + conn := NewConnection(i.Servers[0], i.Privilege, i.HexKey) + require.EqualValues(t, "USERID", conn.Username) + require.EqualValues(t, "lan", conn.Interface) + require.EqualValues(t, "1234567F", conn.HexKey) var testsWithServer = []struct { fields map[string]interface{} @@ -42,7 +46,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -53,7 +57,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -64,7 +68,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -75,7 +79,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -86,7 +90,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -97,7 +101,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -108,7 +112,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -124,7 +128,8 @@ func TestGather(t *testing.T) { i = &Ipmi{ Path: "ipmitool", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + Log: testutil.Logger{}, } err = acc.GatherError(i.Gather) @@ -137,7 +142,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -147,7 +152,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -157,7 +162,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -167,7 +172,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -177,7 +182,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -187,7 +192,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -197,7 +202,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -225,7 +230,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -369,15 +374,19 @@ OS RealTime Mod | 0x00 | ok // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] + // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) - } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -386,8 +395,10 @@ func TestGatherV2(t *testing.T) { Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, Path: "ipmitool", Privilege: "USER", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), MetricVersion: 2, + HexKey: "0000000F", + Log: testutil.Logger{}, } // overwriting exec commands with mock commands execCommand = fakeExecCommandV2 @@ -397,9 +408,10 @@ func TestGatherV2(t *testing.T) { require.NoError(t, err) - conn := NewConnection(i.Servers[0], i.Privilege) - assert.Equal(t, "USERID", conn.Username) - assert.Equal(t, "lan", conn.Interface) + conn := NewConnection(i.Servers[0], i.Privilege, i.HexKey) + require.EqualValues(t, "USERID", conn.Username) + require.EqualValues(t, "lan", conn.Interface) + require.EqualValues(t, "0000000F", conn.HexKey) var testsWithServer = []struct { fields map[string]interface{} @@ -426,8 +438,9 @@ func TestGatherV2(t *testing.T) { i = &Ipmi{ Path: "ipmitool", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), MetricVersion: 2, + Log: testutil.Logger{}, } err = acc.GatherError(i.Gather) @@ -543,7 +556,7 @@ func fakeExecCommandV2(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking // it returns below mockData. -func TestHelperProcessV2(t *testing.T) { +func TestHelperProcessV2(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -562,15 +575,19 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] + // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) - } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -605,10 +622,14 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected v2Data, } + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for i := range tests { t.Logf("Checking v%d data...", i+1) - extractFieldsFromRegex(re_v1_parse_line, tests[i]) - extractFieldsFromRegex(re_v2_parse_line, tests[i]) + ipmi.extractFieldsFromRegex(reV1ParseLine, tests[i]) + ipmi.extractFieldsFromRegex(reV2ParseLine, tests[i]) } } @@ -645,11 +666,16 @@ func Test_parseV1(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV1() error = %v, wantErr %v", err, tt.wantErr) } @@ -738,13 +764,66 @@ func Test_parseV2(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr) } testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } + +func TestSanitizeIPMICmd(t *testing.T) { + tests := []struct { + name string + args []string + expected []string + }{ + { + name: "default args", + args: []string{ + "-H", "localhost", + "-U", "username", + "-P", "password", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-P", "REDACTED", + "-I", "lan", + }, + }, + { + name: "no password", + args: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + }, + { + name: "empty args", + args: []string{}, + expected: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var sanitizedArgs []string = sanitizeIPMICmd(tt.args) + require.Equal(t, tt.expected, sanitizedArgs) + }) + } +} diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index c459ebf4cfe26..82854a35f44f3 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -18,23 +19,23 @@ import ( type Ipset struct { IncludeUnmatchedSets bool UseSudo bool - Timeout internal.Duration + Timeout config.Duration lister setLister } -type setLister func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type setLister func(Timeout config.Duration, UseSudo bool) (*bytes.Buffer, error) const measurement = "ipset" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) // Description returns a short description of the plugin -func (ipset *Ipset) Description() string { +func (i *Ipset) Description() string { return "Gather packets and bytes counters from Linux ipsets" } // SampleConfig returns sample configuration options. -func (ipset *Ipset) SampleConfig() string { +func (i *Ipset) SampleConfig() string { return ` ## By default, we only show sets which have already matched at least 1 packet. ## set include_unmatched_sets = true to gather them all. @@ -46,8 +47,8 @@ func (ipset *Ipset) SampleConfig() string { ` } -func (ips *Ipset) Gather(acc telegraf.Accumulator) error { - out, e := ips.lister(ips.Timeout, ips.UseSudo) +func (i *Ipset) Gather(acc telegraf.Accumulator) error { + out, e := i.lister(i.Timeout, i.UseSudo) if e != nil { acc.AddError(e) } @@ -64,25 +65,25 @@ func (ips *Ipset) Gather(acc telegraf.Accumulator) error { data := strings.Fields(line) if len(data) < 7 { - acc.AddError(fmt.Errorf("Error parsing line (expected at least 7 fields): %s", line)) + acc.AddError(fmt.Errorf("error parsing line (expected at least 7 fields): %s", line)) continue } - if data[0] == "add" && (data[4] != "0" || ips.IncludeUnmatchedSets) { + if data[0] == "add" && (data[4] != "0" || i.IncludeUnmatchedSets) { tags := map[string]string{ "set": data[1], "rule": data[2], } - packets_total, err := strconv.ParseUint(data[4], 10, 64) + packetsTotal, err := strconv.ParseUint(data[4], 10, 64) if err != nil { acc.AddError(err) } - bytes_total, err := strconv.ParseUint(data[6], 10, 64) + bytesTotal, err := strconv.ParseUint(data[6], 10, 64) if err != nil { acc.AddError(err) } fields := map[string]interface{}{ - "packets_total": packets_total, - "bytes_total": bytes_total, + "packets_total": packetsTotal, + "bytes_total": bytesTotal, } acc.AddCounter(measurement, fields, tags) } @@ -90,7 +91,7 @@ func (ips *Ipset) Gather(acc telegraf.Accumulator) error { return nil } -func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func setList(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { // Is ipset installed ? ipsetPath, err := exec.LookPath("ipset") if err != nil { @@ -98,7 +99,7 @@ func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { } var args []string cmdName := ipsetPath - if UseSudo { + if useSudo { cmdName = "sudo" args = append(args, ipsetPath) } @@ -108,7 +109,7 @@ func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { var out bytes.Buffer cmd.Stdout = &out - err = internal.RunTimeout(cmd, Timeout.Duration) + err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running ipset save: %s", err) } diff --git a/plugins/inputs/ipset/ipset_test.go b/plugins/inputs/ipset/ipset_test.go index 31a9f3cfc113d..f205728c0dbad 100644 --- a/plugins/inputs/ipset/ipset_test.go +++ b/plugins/inputs/ipset/ipset_test.go @@ -7,7 +7,7 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -40,7 +40,7 @@ func TestIpset(t *testing.T) { value: `create hash:net family inet hashsize 1024 maxelem 65536 counters add myset 4.5.6.7 packets 123 bytes `, - err: fmt.Errorf("Error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"), + err: fmt.Errorf("error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"), }, { name: "Non-empty sets, counters, no comment", @@ -80,7 +80,7 @@ func TestIpset(t *testing.T) { t.Run(tt.name, func(t *testing.T) { i++ ips := &Ipset{ - lister: func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + lister: func(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.value), nil }, } @@ -123,7 +123,7 @@ func TestIpset(t *testing.T) { func TestIpset_Gather_listerError(t *testing.T) { errFoo := errors.New("error foobar") ips := &Ipset{ - lister: func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + lister: func(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { return new(bytes.Buffer), errFoo }, } diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index e56f8b31d5725..89924b88de7c8 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/plugins/inputs/iptables/iptables_nocompile.go b/plugins/inputs/iptables/iptables_nocompile.go index f71b4208e62fb..17c0eaced90e5 100644 --- a/plugins/inputs/iptables/iptables_nocompile.go +++ b/plugins/inputs/iptables/iptables_nocompile.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package iptables diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index 681d8bbfc130e..4c62ef6d6a86a 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 5e3ae0d5637b0..7dea5240aab0f 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ipvs @@ -8,10 +9,10 @@ import ( "strconv" "syscall" - "github.com/docker/libnetwork/ipvs" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/logrus" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/moby/ipvs" ) // IPVS holds the state for this input plugin diff --git a/plugins/inputs/ipvs/ipvs_notlinux.go b/plugins/inputs/ipvs/ipvs_notlinux.go index bbbb1240b62a8..b46035f2c2b3c 100644 --- a/plugins/inputs/ipvs/ipvs_notlinux.go +++ b/plugins/inputs/ipvs/ipvs_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ipvs diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index f4e9f94ac22a7..e12326031b9ef 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -39,11 +39,16 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ## empty will use default value 10 # max_subjob_per_layer = 10 - ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + ## Jobs to include or exclude from gathering + ## When using both lists, job_exclude has priority. + ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] + # job_include = [ "*" ] + # job_exclude = [ ] - ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] + ## Nodes to include or exclude from gathering + ## When using both lists, node_exclude has priority. + # node_include = [ "*" ] + # node_exclude = [ ] ## Worker pool for jenkins plugin only ## Empty this field will use default value 5 @@ -52,7 +57,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ### Metrics: -- jenkins_node +- jenkins - tags: - source - port @@ -88,6 +93,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - port - fields: - duration (ms) + - number - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) ### Sample Queries: diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go index 6c0a125aaaf56..00c9bb54251f4 100644 --- a/plugins/inputs/jenkins/client.go +++ b/plugins/inputs/jenkins/client.go @@ -47,11 +47,9 @@ func (c *client) init() error { break } } + // first api fetch - if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil { - return err - } - return nil + return c.doGet(context.Background(), jobPath, new(jobResponse)) } func (c *client) doGet(ctx context.Context, url string, v interface{}) error { @@ -71,6 +69,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error { return err } defer func() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive resp.Body.Close() <-c.semaphore }() @@ -97,10 +97,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error { Title: resp.Status, } } - if err = json.NewDecoder(resp.Body).Decode(v); err != nil { - return err - } - return nil + + return json.NewDecoder(resp.Body).Decode(v) } type APIError struct { diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index a2d3e3500bc30..9543c3ab17b87 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -11,8 +11,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -25,21 +25,23 @@ type Jenkins struct { Source string Port string // HTTP Timeout specified as a string - 3s, 1m, 1h - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *client Log telegraf.Logger - MaxConnections int `toml:"max_connections"` - MaxBuildAge internal.Duration `toml:"max_build_age"` - MaxSubJobDepth int `toml:"max_subjob_depth"` - MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` - JobExclude []string `toml:"job_exclude"` + MaxConnections int `toml:"max_connections"` + MaxBuildAge config.Duration `toml:"max_build_age"` + MaxSubJobDepth int `toml:"max_subjob_depth"` + MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` + JobExclude []string `toml:"job_exclude"` + JobInclude []string `toml:"job_include"` jobFilter filter.Filter NodeExclude []string `toml:"node_exclude"` + NodeInclude []string `toml:"node_include"` nodeFilter filter.Filter semaphore chan struct{} @@ -77,11 +79,16 @@ const sampleConfig = ` ## empty will use default value 10 # max_subjob_per_layer = 10 - ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + ## Jobs to include or exclude from gathering + ## When using both lists, job_exclude has priority. + ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] + # job_include = [ "*" ] + # job_exclude = [ ] - ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] + ## Nodes to include or exclude from gathering + ## When using both lists, node_exclude has priority. + # node_include = [ "*" ] + # node_exclude = [ ] ## Worker pool for jenkins plugin only ## Empty this field will use default value 5 @@ -133,7 +140,7 @@ func (j *Jenkins) newHTTPClient() (*http.Client, error) { TLSClientConfig: tlsCfg, MaxIdleConns: j.MaxConnections, }, - Timeout: j.ResponseTimeout.Duration, + Timeout: time.Duration(j.ResponseTimeout), }, nil } @@ -157,16 +164,14 @@ func (j *Jenkins) initialize(client *http.Client) error { } j.Source = u.Hostname() - // init job filter - j.jobFilter, err = filter.Compile(j.JobExclude) + // init filters + j.jobFilter, err = filter.NewIncludeExcludeFilter(j.JobInclude, j.JobExclude) if err != nil { - return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) + return fmt.Errorf("error compiling job filters[%s]: %v", j.URL, err) } - - // init node filter - j.nodeFilter, err = filter.Compile(j.NodeExclude) + j.nodeFilter, err = filter.NewIncludeExcludeFilter(j.NodeInclude, j.NodeExclude) if err != nil { - return fmt.Errorf("error compile node filters[%s]: %v", j.URL, err) + return fmt.Errorf("error compiling node filters[%s]: %v", j.URL, err) } // init tcp pool with default value @@ -187,15 +192,15 @@ func (j *Jenkins) initialize(client *http.Client) error { } func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { - tags := map[string]string{} if n.DisplayName == "" { return fmt.Errorf("error empty node name") } tags["node_name"] = n.DisplayName - // filter out excluded node_name - if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) { + + // filter out excluded or not included node_name + if !j.nodeFilter.Match(tags["node_name"]) { return nil } @@ -239,7 +244,6 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { } func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) { - nodeResp, err := j.client.getAllNodes(context.Background()) if err != nil { acc.AddError(err) @@ -287,24 +291,13 @@ func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) { wg.Wait() } -// wrap the tcp request with doGet -// block tcp request if buffered channel is full -func (j *Jenkins) doGet(tcp func() error) error { - j.semaphore <- struct{}{} - if err := tcp(); err != nil { - <-j.semaphore - return err - } - <-j.semaphore - return nil -} - func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth { return nil } - // filter out excluded job. - if j.jobFilter != nil && j.jobFilter.Match(jr.hierarchyName()) { + + // filter out excluded or not included jobs + if !j.jobFilter.Match(jr.hierarchyName()) { return nil } @@ -351,7 +344,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { // stop if build is too old // Higher up in gatherJobs - cutoff := time.Now().Add(-1 * j.MaxBuildAge.Duration) + cutoff := time.Now().Add(-1 * time.Duration(j.MaxBuildAge)) // Here we just test if build.GetTimestamp().Before(cutoff) { @@ -419,12 +412,13 @@ type jobBuild struct { type buildResponse struct { Building bool `json:"building"` Duration int64 `json:"duration"` + Number int64 `json:"number"` Result string `json:"result"` Timestamp int64 `json:"timestamp"` } func (b *buildResponse) GetTimestamp() time.Time { - return time.Unix(0, int64(b.Timestamp)*int64(time.Millisecond)) + return time.Unix(0, b.Timestamp*int64(time.Millisecond)) } const ( @@ -473,6 +467,7 @@ func (j *Jenkins) gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.A fields := make(map[string]interface{}) fields["duration"] = b.Duration fields["result_code"] = mapResultCode(b.Result) + fields["number"] = b.Number acc.AddFields(measurementJob, fields, tags, b.GetTimestamp()) } @@ -497,7 +492,7 @@ func mapResultCode(s string) int { func init() { inputs.Add("jenkins", func() telegraf.Input { return &Jenkins{ - MaxBuildAge: internal.Duration{Duration: time.Duration(time.Hour)}, + MaxBuildAge: config.Duration(time.Hour), MaxConnections: 5, MaxSubJobPerLayer: 10, } diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index be899476d8595..e5f09ad66d1ca 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -44,13 +44,13 @@ func TestJobRequest(t *testing.T) { } for _, test := range tests { hierarchyName := test.input.hierarchyName() - URL := test.input.URL() + address := test.input.URL() if hierarchyName != test.hierarchyName { t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName) } - if test.URL != "" && URL != test.URL { - t.Errorf("Expected %s, got %s\n", test.URL, URL) + if test.URL != "" && address != test.URL { + t.Errorf("Expected %s, got %s\n", test.URL, address) } } } @@ -97,6 +97,8 @@ func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) return } + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive w.Write(b) } @@ -154,7 +156,7 @@ func TestGatherNodeData(t *testing.T) { }, }, { - name: "filtered nodes", + name: "filtered nodes (excluded)", input: mockHandler{ responseMap: map[string]interface{}{ "/api/json": struct{}{}, @@ -182,6 +184,35 @@ func TestGatherNodeData(t *testing.T) { }, }, }, + { + name: "filtered nodes (included)", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, + Computers: []node{ + {DisplayName: "filtered-1"}, + {DisplayName: "filtered-1"}, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, + }, + }, + }, { name: "normal data collection", input: mockHandler{ @@ -302,8 +333,9 @@ func TestGatherNodeData(t *testing.T) { j := &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), NodeExclude: []string{"ignore-1", "ignore-2"}, + NodeInclude: []string{"master", "slave"}, } te := j.initialize(&http.Client{Transport: &http.Transport{}}) acc := new(testutil.Accumulator) @@ -358,7 +390,7 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: "http://a bad url", - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), }, wantErr: true, }, @@ -367,7 +399,8 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), + JobInclude: []string{"jobA", "jobB"}, JobExclude: []string{"job1", "job2"}, NodeExclude: []string{"node1", "node2"}, }, @@ -377,7 +410,7 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), }, output: &Jenkins{ Log: testutil.Logger{}, @@ -396,7 +429,7 @@ func TestInitialize(t *testing.T) { } if test.output != nil { if test.input.client == nil { - t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + t.Fatalf("%s: failed %v, jenkins instance shouldn't be nil", test.name, te) } if test.input.MaxConnections != test.output.MaxConnections { t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) @@ -530,12 +563,14 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "SUCCESS", Duration: 25558, + Number: 3, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/job2/1/api/json": &buildResponse{ Building: false, Result: "FAILURE", Duration: 1558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -549,6 +584,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(25558), + "number": int64(3), "result_code": 0, }, }, @@ -559,6 +595,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(1558), + "number": int64(1), "result_code": 1, }, }, @@ -583,6 +620,7 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "SUCCESS", Duration: 25558, + Number: 3, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -596,6 +634,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(25558), + "number": int64(3), "result_code": 0, }, }, @@ -711,24 +750,28 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "FAILURE", Duration: 1558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 76558, + Number: 4, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 91558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR%201/1/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 87832, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -743,6 +786,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(87832), + "number": int64(1), "result_code": 0, }, }, @@ -754,6 +798,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(91558), + "number": int64(1), "result_code": 0, }, }, @@ -765,6 +810,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(76558), + "number": int64(4), "result_code": 0, }, }, @@ -776,6 +822,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(1558), + "number": int64(1), "result_code": 1, }, }, @@ -790,8 +837,11 @@ func TestGatherJobs(t *testing.T) { j := &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - MaxBuildAge: internal.Duration{Duration: time.Hour}, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + MaxBuildAge: config.Duration(time.Hour), + ResponseTimeout: config.Duration(time.Microsecond), + JobInclude: []string{ + "*", + }, JobExclude: []string{ "ignore-1", "apps/ignore-all/*", @@ -828,7 +878,6 @@ func TestGatherJobs(t *testing.T) { } } } - } }) } diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 96ee48701b464..9f2a658f16247 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,6 +1,6 @@ # Jolokia Input Plugin -**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. +### Deprecated in version 1.5: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. #### Configuration diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 317a47efbd115..af5e3de283800 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -4,20 +4,19 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" - "log" + "io" "net/http" "net/url" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Default http timeouts -var DefaultResponseHeaderTimeout = internal.Duration{Duration: 3 * time.Second} -var DefaultClientTimeout = internal.Duration{Duration: 4 * time.Second} +var DefaultResponseHeaderTimeout = config.Duration(3 * time.Second) +var DefaultClientTimeout = config.Duration(4 * time.Second) type Server struct { Name string @@ -55,8 +54,9 @@ type Jolokia struct { Proxy Server Delimiter string - ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"` - ClientTimeout internal.Duration `toml:"client_timeout"` + ResponseHeaderTimeout config.Duration `toml:"response_header_timeout"` + ClientTimeout config.Duration `toml:"client_timeout"` + Log telegraf.Logger `toml:"-"` } const sampleConfig = ` @@ -143,7 +143,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", req.RequestURI, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -153,22 +153,22 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } // Unmarshal json var jsonOut []map[string]interface{} - if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { - return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, body) + if err = json.Unmarshal(body, &jsonOut); err != nil { + return nil, fmt.Errorf("error decoding JSON response: %s: %s", err, body) } return jsonOut, nil } func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request, error) { - var jolokiaUrl *url.URL + var jolokiaURL *url.URL context := j.Context // Usually "/jolokia/" var bulkBodyContent []map[string]interface{} @@ -188,11 +188,11 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request // Add target, only in proxy mode if j.Mode == "proxy" { - serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", + serviceURL := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", server.Host, server.Port) target := map[string]string{ - "url": serviceUrl, + "url": serviceURL, } if server.Username != "" { @@ -208,26 +208,25 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request proxy := j.Proxy // Prepare ProxyURL - proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) + proxyURL, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) if err != nil { return nil, err } if proxy.Username != "" || proxy.Password != "" { - proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password) + proxyURL.User = url.UserPassword(proxy.Username, proxy.Password) } - jolokiaUrl = proxyUrl - + jolokiaURL = proxyURL } else { - serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context) + serverURL, err := url.Parse("http://" + server.Host + ":" + server.Port + context) if err != nil { return nil, err } if server.Username != "" || server.Password != "" { - serverUrl.User = url.UserPassword(server.Username, server.Password) + serverURL.User = url.UserPassword(server.Username, server.Password) } - jolokiaUrl = serverUrl + jolokiaURL = serverURL } bulkBodyContent = append(bulkBodyContent, bodyContent) @@ -238,7 +237,7 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request return nil, err } - req, err := http.NewRequest("POST", jolokiaUrl.String(), bytes.NewBuffer(requestBody)) + req, err := http.NewRequest("POST", jolokiaURL.String(), bytes.NewBuffer(requestBody)) if err != nil { return nil, err } @@ -259,16 +258,15 @@ func (j *Jolokia) extractValues(measurement string, value interface{}, fields ma } func (j *Jolokia) Gather(acc telegraf.Accumulator) error { - if j.jClient == nil { - log.Println("W! DEPRECATED: the jolokia plugin has been deprecated " + + j.Log.Warn("DEPRECATED: the jolokia plugin has been deprecated " + "in favor of the jolokia2 plugin " + "(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2)") - tr := &http.Transport{ResponseHeaderTimeout: j.ResponseHeaderTimeout.Duration} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(j.ResponseHeaderTimeout)} j.jClient = &JolokiaClientImpl{&http.Client{ Transport: tr, - Timeout: j.ClientTimeout.Duration, + Timeout: time.Duration(j.ClientTimeout), }} } @@ -299,18 +297,18 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { } for i, resp := range out { if status, ok := resp["status"]; ok && status != float64(200) { - acc.AddError(fmt.Errorf("Not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", + acc.AddError(fmt.Errorf("not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status)) continue } else if !ok { - acc.AddError(fmt.Errorf("Missing status in response body")) + acc.AddError(fmt.Errorf("missing status in response body")) continue } if values, ok := resp["value"]; ok { j.extractValues(metrics[i].Name, values, fields) } else { - acc.AddError(fmt.Errorf("Missing key 'value' in output response\n")) + acc.AddError(fmt.Errorf("missing key 'value' in output response")) } } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index a1ca60604cf00..e91e9a1087fda 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -2,7 +2,7 @@ package jolokia import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -98,25 +98,8 @@ const validMultiValueJSON = ` } ]` -const validSingleValueJSON = ` -[ - { - "request":{ - "path":"used", - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":209274376, - "timestamp":1446129256, - "status":200 - } -]` - const invalidJSON = "I don't think this is JSON" -const empty = "" - var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}} var HeapMetric = Metric{Name: "heap_memory_usage", Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"} @@ -130,10 +113,10 @@ type jolokiaClientStub struct { statusCode int } -func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) { +func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -239,9 +222,7 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) { // Test that the proper values are ignored or collected func TestHttp404(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, - []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []Metric{UsedHeapMetric}) var acc testutil.Accumulator acc.SetDebug(true) @@ -254,9 +235,7 @@ func TestHttp404(t *testing.T) { // Test that the proper values are ignored or collected func TestHttpInvalidJson(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 200, Servers, - []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 200, Servers, []Metric{UsedHeapMetric}) var acc testutil.Accumulator acc.SetDebug(true) @@ -264,5 +243,5 @@ func TestHttpInvalidJson(t *testing.T) { assert.Error(t, err) assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "Error decoding JSON response") + assert.Contains(t, err.Error(), "error decoding JSON response") } diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md index 4a7b8f4200a42..a944949dbab7e 100644 --- a/plugins/inputs/jolokia2/README.md +++ b/plugins/inputs/jolokia2/README.md @@ -179,6 +179,7 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration - [Java JVM](/plugins/inputs/jolokia2/examples/java.conf) - [JBoss](/plugins/inputs/jolokia2/examples/jboss.conf) - [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf) +- [Kafka Connect](/plugins/inputs/jolokia2/examples/kafka-connect.conf) - [Tomcat](/plugins/inputs/jolokia2/examples/tomcat.conf) - [Weblogic](/plugins/inputs/jolokia2/examples/weblogic.conf) - [ZooKeeper](/plugins/inputs/jolokia2/examples/zookeeper.conf) diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 90aa9c0db7fce..e3b42f660dff6 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -95,7 +95,7 @@ type jolokiaResponse struct { Status int `json:"status"` } -func NewClient(url string, config *ClientConfig) (*Client, error) { +func NewClient(address string, config *ClientConfig) (*Client, error) { tlsConfig, err := config.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -112,27 +112,28 @@ func NewClient(url string, config *ClientConfig) (*Client, error) { } return &Client{ - URL: url, + URL: address, config: config, client: client, }, nil } func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { - jrequests := makeJolokiaRequests(requests, c.config.ProxyConfig) - requestBody, err := json.Marshal(jrequests) + jRequests := makeJolokiaRequests(requests, c.config.ProxyConfig) + requestBody, err := json.Marshal(jRequests) if err != nil { return nil, err } - requestUrl, err := formatReadUrl(c.URL, c.config.Username, c.config.Password) + requestURL, err := formatReadURL(c.URL, c.config.Username, c.config.Password) if err != nil { return nil, err } - req, err := http.NewRequest("POST", requestUrl, bytes.NewBuffer(requestBody)) + req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody)) if err != nil { - return nil, fmt.Errorf("unable to create new request '%s': %s", requestUrl, err) + //err is not contained in returned error - it may contain sensitive data (password) which should not be logged + return nil, fmt.Errorf("unable to create new request for: '%s'", c.URL) } req.Header.Add("Content-type", "application/json") @@ -144,21 +145,21 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + return nil, fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } - var jresponses []jolokiaResponse - if err = json.Unmarshal([]byte(responseBody), &jresponses); err != nil { - return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, responseBody) + var jResponses []jolokiaResponse + if err = json.Unmarshal(responseBody, &jResponses); err != nil { + return nil, fmt.Errorf("decoding JSON response: %s: %s", err, responseBody) } - return makeReadResponses(jresponses), nil + return makeReadResponses(jResponses), nil } func makeJolokiaRequests(rrequests []ReadRequest, proxyConfig *ProxyConfig) []jolokiaRequest { @@ -249,22 +250,22 @@ func makeReadResponses(jresponses []jolokiaResponse) []ReadResponse { return rresponses } -func formatReadUrl(configUrl, username, password string) (string, error) { - parsedUrl, err := url.Parse(configUrl) +func formatReadURL(configURL, username, password string) (string, error) { + parsedURL, err := url.Parse(configURL) if err != nil { return "", err } - readUrl := url.URL{ - Host: parsedUrl.Host, - Scheme: parsedUrl.Scheme, + readURL := url.URL{ + Host: parsedURL.Host, + Scheme: parsedURL.Scheme, } if username != "" || password != "" { - readUrl.User = url.UserPassword(username, password) + readURL.User = url.UserPassword(username, password) } - readUrl.Path = path.Join(parsedUrl.Path, "read") - readUrl.Query().Add("ignoreErrors", "true") - return readUrl.String(), nil + readURL.Path = path.Join(parsedURL.Path, "read") + readURL.Query().Add("ignoreErrors", "true") + return readURL.String(), nil } diff --git a/plugins/inputs/jolokia2/client_test.go b/plugins/inputs/jolokia2/client_test.go index 0c7cd4c010d50..a1bd5f4a2e141 100644 --- a/plugins/inputs/jolokia2/client_test.go +++ b/plugins/inputs/jolokia2/client_test.go @@ -3,12 +3,13 @@ package jolokia2 import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestJolokia2_ClientAuthRequest(t *testing.T) { @@ -19,11 +20,8 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) - err := json.Unmarshal(body, &requests) - if err != nil { - t.Error(err) - } + body, _ := io.ReadAll(r.Body) + require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) })) @@ -40,22 +38,14 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { `, server.URL)) var acc testutil.Accumulator - plugin.Gather(&acc) - - if username != "sally" { - t.Errorf("Expected to post with username %s, but was %s", "sally", username) - } - if password != "seashore" { - t.Errorf("Expected to post with password %s, but was %s", "seashore", password) - } - if len(requests) == 0 { - t.Fatal("Expected to post a request body, but was empty.") - } + require.NoError(t, plugin.Gather(&acc)) - request := requests[0] - if expect := "hello:foo=bar"; request["mbean"] != expect { - t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"]) - } + require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) + require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) + require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") + + request := requests[0]["mbean"] + require.EqualValuesf(t, "hello:foo=bar", request, "Expected to query mbean %s, but was %s", "hello:foo=bar", request) } func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { @@ -66,13 +56,11 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) - err := json.Unmarshal(body, &requests) - if err != nil { - t.Error(err) - } - + body, _ := io.ReadAll(r.Body) + require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) + _, err := fmt.Fprintf(w, "[]") + require.NoError(t, err) })) defer server.Close() @@ -93,37 +81,22 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { `, server.URL)) var acc testutil.Accumulator - plugin.Gather(&acc) - - if username != "sally" { - t.Errorf("Expected to post with username %s, but was %s", "sally", username) - } - if password != "seashore" { - t.Errorf("Expected to post with password %s, but was %s", "seashore", password) - } - if len(requests) == 0 { - t.Fatal("Expected to post a request body, but was empty.") - } + require.NoError(t, plugin.Gather(&acc)) + require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) + require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) + require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") request := requests[0] - if expect := "hello:foo=bar"; request["mbean"] != expect { - t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"]) - } + expected := "hello:foo=bar" + require.EqualValuesf(t, expected, request["mbean"], "Expected to query mbean %s, but was %s", expected, request["mbean"]) target, ok := request["target"].(map[string]interface{}) - if !ok { - t.Fatal("Expected a proxy target, but was empty.") - } - - if expect := "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"; target["url"] != expect { - t.Errorf("Expected proxy target url %s, but was %s", expect, target["url"]) - } - - if expect := "jack"; target["user"] != expect { - t.Errorf("Expected proxy target username %s, but was %s", expect, target["user"]) - } - - if expect := "benimble"; target["password"] != expect { - t.Errorf("Expected proxy target password %s, but was %s", expect, target["password"]) - } + require.True(t, ok, "Expected a proxy target, but was empty.") + + expected = "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi" + require.Equalf(t, expected, target["url"], "Expected proxy target url %s, but was %s", expected, target["url"]) + expected = "jack" + require.Equalf(t, expected, target["user"], "Expected proxy target username %s, but was %s", expected, target["user"]) + expected = "benimble" + require.Equalf(t, expected, target["password"], "Expected proxy target username %s, but was %s", expected, target["password"]) } diff --git a/plugins/inputs/jolokia2/examples/kafka-connect.conf b/plugins/inputs/jolokia2/examples/kafka-connect.conf new file mode 100644 index 0000000000000..d84f5fd58df2c --- /dev/null +++ b/plugins/inputs/jolokia2/examples/kafka-connect.conf @@ -0,0 +1,90 @@ +[[inputs.jolokia2_agent]] + urls = ["http://localhost:8080/jolokia"] + name_prefix = "kafka.connect." + + [[processors.enum]] + [[processors.enum.mapping]] + field = "status" + + [processors.enum.mapping.value_mappings] + paused = 0 + running = 1 + unassigned = 2 + failed = 3 + destroyed = 4 + + [inputs.jolokia2_agent.tags] + input_type = "kafka-connect" + + # https://kafka.apache.org/documentation/#connect_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerMetrics" + mbean = "kafka.connect:type=connect-worker-metrics" + paths = ["connector-count", "connector-startup-attempts-total", "connector-startup-failure-percentage", "connector-startup-failure-total", "connector-startup-success-percentage", "connector-startup-success-total", "task-count", "task-startup-attempts-total", "task-startup-failure-percentage", "task-startup-failure-total", "task-startup-success-percentage", "task-startup-success-total"] + + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerMetrics" + mbean = "kafka.connect:type=connect-worker-metrics,connector=*" + paths = ["connector-destroyed-task-count", "connector-failed-task-count", "connector-paused-task-count", "connector-running-task-count", "connector-total-task-count", "connector-unassigned-task-count"] + tag_keys = ["connector"] + + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerRebalanceMetrics" + mbean = "kafka.connect:type=connect-worker-rebalance-metrics" + paths = ["completed-rebalances-total", "connect-protocol", "epoch", "leader-name", "rebalance-avg-time-ms", "rebalance-max-time-ms", "rebalancing", "time-since-last-rebalance-ms"] + + [[inputs.jolokia2_agent.metric]] + name = "connectorMetrics" + mbean = "kafka.connect:type=connector-metrics,connector=*" + paths = ["connector-class", "connector-version", "connector-type", "status"] + tag_keys = ["connector"] + + [[inputs.jolokia2_agent.metric]] + name = "connectorTaskMetrics" + mbean = "kafka.connect:type=connector-task-metrics,connector=*,task=*" + paths = ["batch-size-avg", "batch-size-max", "offset-commit-avg-time-ms", "offset-commit-failure-percentage", "offset-commit-max-time-ms", "offset-commit-success-percentage", "pause-ratio", "running-ratio", "status"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "sinkTaskMetrics" + mbean = "kafka.connect:type=sink-task-metrics,connector=*,task=*" + paths = ["offset-commit-completion-rate", "offset-commit-completion-total", "offset-commit-seq-no", "offset-commit-skip-rate", "offset-commit-skip-total", "partition-count", "put-batch-avg-time-ms", "put-batch-max-time-ms", "sink-record-active-count", "sink-record-active-count-avg", "sink-record-active-count-max", "sink-record-lag-max", "sink-record-read-rate", "sink-record-read-total", "sink-record-send-rate", "sink-record-send-total"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "sourceTaskMetrics" + mbean = "kafka.connect:type=source-task-metrics,connector=*,task=*" + paths = ["poll-batch-avg-time-ms", "poll-batch-max-time-ms", "source-record-active-count", "source-record-active-count-avg", "source-record-active-count-max", "source-record-poll-rate", "source-record-poll-total", "source-record-write-rate", "source-record-write-total"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "taskErrorMetrics" + mbean = "kafka.connect:type=task-error-metrics,connector=*,task=*" + paths = ["deadletterqueue-produce-failures", "deadletterqueue-produce-requests", "last-error-timestamp", "total-errors-logged", "total-record-errors", "total-record-failures", "total-records-skipped", "total-retries"] + tag_keys = ["connector", "task"] + + # https://kafka.apache.org/documentation/#selector_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectMetrics" + mbean = "kafka.connect:type=connect-metrics,client-id=*" + paths = ["connection-close-rate", "connection-close-total", "connection-creation-rate", "connection-creation-total", "network-io-rate", "network-io-total", "outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-rate", "incoming-byte-total", "response-rate", "response-total", "select-rate", "select-total", "io-wait-time-ns-avg", "io-wait-ratio", "io-time-ns-avg", "io-ratio", "connection-count", "successful-authentication-rate", "successful-authentication-total", "failed-authentication-rate", "failed-authentication-total", "successful-reauthentication-rate", "successful-reauthentication-total", "reauthentication-latency-max", "reauthentication-latency-avg", "failed-reauthentication-rate", "failed-reauthentication-total", "successful-authentication-no-reauth-total"] + tag_keys = ["client-id"] + + # https://kafka.apache.org/documentation/#common_node_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectNodeMetrics" + mbean = "kafka.connect:type=connect-node-metrics,client-id=*,node-id=*" + paths = ["outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-total", "request-latency-avg", "request-latency-max", "response-rate", "response-total"] + tag_keys = ["client-id", "node-id"] + + [[inputs.jolokia2_agent.metric]] + name = "appInfo" + mbean = "kafka.connect:type=app-info,client-id=*" + paths = ["start-time-ms", "commit-id", "version"] + tag_keys = ["client-id"] + + [[inputs.jolokia2_agent.metric]] + name = "connectCoordinatorMetrics" + mbean = "kafka.connect:type=connect-coordinator-metrics,client-id=*" + paths = ["join-time-max", "failed-rebalance-rate-per-hour", "rebalance-latency-total", "sync-time-avg", "join-rate", "sync-rate", "failed-rebalance-total", "rebalance-total", "last-heartbeat-seconds-ago", "heartbeat-rate", "join-time-avg", "sync-total", "rebalance-latency-max", "sync-time-max", "last-rebalance-seconds-ago", "rebalance-rate-per-hour", "assigned-connectors", "heartbeat-total", "assigned-tasks", "heartbeat-response-time-max", "rebalance-latency-avg", "join-total"] + tag_keys = ["client-id"] \ No newline at end of file diff --git a/plugins/inputs/jolokia2/examples/kafka.conf b/plugins/inputs/jolokia2/examples/kafka.conf index ae34831fc55c9..24053b5ad6fa7 100644 --- a/plugins/inputs/jolokia2/examples/kafka.conf +++ b/plugins/inputs/jolokia2/examples/kafka.conf @@ -1,6 +1,30 @@ [[inputs.jolokia2_agent]] name_prefix = "kafka_" + + ## If you intend to use "non_negative_derivative(1s)" with "*.count" fields, you don't need precalculated fields. + # fielddrop = [ + # "*.EventType", + # "*.FifteenMinuteRate", + # "*.FiveMinuteRate", + # "*.MeanRate", + # "*.OneMinuteRate", + # "*.RateUnit", + # "*.LatencyUnit", + # "*.50thPercentile", + # "*.75thPercentile", + # "*.95thPercentile", + # "*.98thPercentile", + # "*.99thPercentile", + # "*.999thPercentile", + # "*.Min", + # "*.Mean", + # "*.Max", + # "*.StdDev" + # ] + + ## jolokia_agent_url tag is not needed if you have only one instance of Kafka on the server. + # tagexclude = ["jolokia_agent_url"] urls = ["http://localhost:8080/jolokia"] @@ -21,9 +45,15 @@ field_name = "$2" [[inputs.jolokia2_agent.metric]] - name = "client" - mbean = "kafka.server:client-id=*,type=*" - tag_keys = ["client-id", "type"] + name = "zookeeper" + mbean = "kafka.server:name=*,type=SessionExpireListener" + field_prefix = "$1." + + [[inputs.jolokia2_agent.metric]] + name = "user" + mbean = "kafka.server:user=*,type=Request" + field_prefix = "" + tag_keys = ["user"] [[inputs.jolokia2_agent.metric]] name = "request" @@ -53,3 +83,27 @@ mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition" field_name = "UnderReplicatedPartitions" tag_keys = ["topic", "partition"] + +## If you have multiple instances of Kafka on the server, use 'jolokia_agent_url' as identity of each instance +# [[processors.rename]] +# namepass = ["kafka_*"] +# order = 1 +# [[processors.rename.replace]] +# tag = "jolokia_agent_url" +# dest = "instance" +# +# [[processors.regex]] +# namepass = ["kafka_*"] +# order = 2 +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8080/.+$" +# replacement = "0" +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8081/.+$" +# replacement = "1" +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8082/.+$" +# replacement = "2" diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index f24918998248e..1dfdc057e832b 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -46,7 +46,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error { // gatherResponses adds points to an accumulator from the ReadResponse objects // returned by a Jolokia agent. func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) { - series := make(map[string][]point, 0) + series := make(map[string][]point) for _, metric := range g.metrics { points, ok := series[metric.Name] @@ -55,11 +55,7 @@ func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]str } responsePoints, responseErrors := g.generatePoints(metric, responses) - - for _, responsePoint := range responsePoints { - points = append(points, responsePoint) - } - + points = append(points, responsePoints...) for _, err := range responseErrors { acc.AddError(err) } @@ -84,11 +80,11 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po for _, response := range responses { switch response.Status { case 200: - break + // Correct response status - do nothing. case 404: continue default: - errors = append(errors, fmt.Errorf("Unexpected status in response from target %s (%q): %d", + errors = append(errors, fmt.Errorf("unexpected status in response from target %s (%q): %d", response.RequestTarget, response.RequestMbean, response.Status)) continue } @@ -195,7 +191,6 @@ func tagSetsMatch(a, b map[string]string) bool { func makeReadRequests(metrics []Metric) []ReadRequest { var requests []ReadRequest for _, metric := range metrics { - if len(metric.Paths) == 0 { requests = append(requests, ReadRequest{ Mbean: metric.Mbean, diff --git a/plugins/inputs/jolokia2/jolokia_agent.go b/plugins/inputs/jolokia2/jolokia_agent.go index 58b67ce5a1c9a..23336dd6f4351 100644 --- a/plugins/inputs/jolokia2/jolokia_agent.go +++ b/plugins/inputs/jolokia2/jolokia_agent.go @@ -3,9 +3,10 @@ package jolokia2 import ( "fmt" "sync" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -17,7 +18,7 @@ type JolokiaAgent struct { URLs []string `toml:"urls"` Username string Password string - ResponseTimeout internal.Duration `toml:"response_timeout"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig @@ -67,7 +68,7 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { for _, url := range ja.URLs { client, err := ja.createClient(url) if err != nil { - acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err)) + acc.AddError(fmt.Errorf("unable to create client for %s: %v", url, err)) continue } ja.clients = append(ja.clients, client) @@ -83,9 +84,8 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { err := ja.gatherer.Gather(client, acc) if err != nil { - acc.AddError(fmt.Errorf("Unable to gather metrics for %s: %v", client.URL, err)) + acc.AddError(fmt.Errorf("unable to gather metrics for %s: %v", client.URL, err)) } - }(client) } @@ -97,8 +97,8 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { func (ja *JolokiaAgent) createMetrics() []Metric { var metrics []Metric - for _, config := range ja.Metrics { - metrics = append(metrics, NewMetric(config, + for _, metricConfig := range ja.Metrics { + metrics = append(metrics, NewMetric(metricConfig, ja.DefaultFieldPrefix, ja.DefaultFieldSeparator, ja.DefaultTagPrefix)) } @@ -109,7 +109,7 @@ func (ja *JolokiaAgent) createClient(url string) (*Client, error) { return NewClient(url, &ClientConfig{ Username: ja.Username, Password: ja.Password, - ResponseTimeout: ja.ResponseTimeout.Duration, + ResponseTimeout: time.Duration(ja.ResponseTimeout), ClientConfig: ja.ClientConfig, }) } diff --git a/plugins/inputs/jolokia2/jolokia_proxy.go b/plugins/inputs/jolokia2/jolokia_proxy.go index 6428a88515aee..8654c9308762c 100644 --- a/plugins/inputs/jolokia2/jolokia_proxy.go +++ b/plugins/inputs/jolokia2/jolokia_proxy.go @@ -1,8 +1,10 @@ package jolokia2 import ( + "time" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -18,7 +20,7 @@ type JolokiaProxy struct { Username string Password string - ResponseTimeout internal.Duration `toml:"response_timeout"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig Metrics []MetricConfig `toml:"metric"` @@ -91,8 +93,8 @@ func (jp *JolokiaProxy) Gather(acc telegraf.Accumulator) error { func (jp *JolokiaProxy) createMetrics() []Metric { var metrics []Metric - for _, config := range jp.Metrics { - metrics = append(metrics, NewMetric(config, + for _, metricConfig := range jp.Metrics { + metrics = append(metrics, NewMetric(metricConfig, jp.DefaultFieldPrefix, jp.DefaultFieldSeparator, jp.DefaultTagPrefix)) } @@ -116,7 +118,7 @@ func (jp *JolokiaProxy) createClient() (*Client, error) { return NewClient(jp.URL, &ClientConfig{ Username: jp.Username, Password: jp.Password, - ResponseTimeout: jp.ResponseTimeout.Duration, + ResponseTimeout: time.Duration(jp.ResponseTimeout), ClientConfig: jp.ClientConfig, ProxyConfig: proxyConfig, }) diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index 61c410c0b2067..01750bf002ff5 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -6,11 +6,12 @@ import ( "net/http/httptest" "testing" + "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" ) func TestJolokia2_ScalarValues(t *testing.T) { @@ -74,7 +75,7 @@ func TestJolokia2_ScalarValues(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -234,7 +235,7 @@ func TestJolokia2_ObjectValues(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -322,7 +323,7 @@ func TestJolokia2_StatusCodes(t *testing.T) { "status": 500 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -372,7 +373,7 @@ func TestJolokia2_TagRenaming(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -465,7 +466,7 @@ func TestJolokia2_FieldRenaming(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -573,7 +574,7 @@ func TestJolokia2_MetricMbeanMatching(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -666,7 +667,7 @@ func TestJolokia2_MetricCompaction(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -727,7 +728,7 @@ func TestJolokia2_ProxyTargets(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -749,27 +750,23 @@ func TestJolokia2_ProxyTargets(t *testing.T) { } func TestFillFields(t *testing.T) { - complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} - var scalar interface{} - scalar = []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + complexPoint := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + scalarPoint := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} results := map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results) + newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complexPoint, results) assert.Equal(t, map[string]interface{}{}, results) results = map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalar, results) + newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalarPoint, results) assert.Equal(t, map[string]interface{}{}, results) } -func setupServer(status int, resp string) *httptest.Server { +func setupServer(resp string) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - //body, err := ioutil.ReadAll(r.Body) - //if err == nil { - // fmt.Println(string(body)) - //} - + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, resp) })) } diff --git a/plugins/inputs/jolokia2/point_builder.go b/plugins/inputs/jolokia2/point_builder.go index f5ae1d31410ec..c1b985155b4b8 100644 --- a/plugins/inputs/jolokia2/point_builder.go +++ b/plugins/inputs/jolokia2/point_builder.go @@ -40,7 +40,6 @@ func (pb *pointBuilder) Build(mbean string, value interface{}) []point { points := make([]point, 0) for mbean, value := range valueMap { - points = append(points, point{ Tags: pb.extractTags(mbean), Fields: pb.extractFields(mbean, value), @@ -99,13 +98,11 @@ func (pb *pointBuilder) extractFields(mbean string, value interface{}) map[strin // if there were no attributes requested, // then the keys are attributes pb.fillFields("", valueMap, fieldMap) - } else if len(pb.objectAttributes) == 1 { // if there was a single attribute requested, // then the keys are the attribute's properties fieldName := pb.formatFieldName(pb.objectAttributes[0], pb.objectPath) pb.fillFields(fieldName, valueMap, fieldMap) - } else { // if there were multiple attributes requested, // then the keys are the attribute names @@ -199,7 +196,6 @@ func (pb *pointBuilder) applySubstitutions(mbean string, fieldMap map[string]int properties := makePropertyMap(mbean) for i, subKey := range pb.substitutions[1:] { - symbol := fmt.Sprintf("$%d", i+1) substitution := properties[subKey] diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go index 7ddeefacab635..1342758887932 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go @@ -1,182 +1,238 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: authentication_service.proto +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: auth/authentication_service.proto -/* -Package authentication is a generated protocol buffer package. - -It is generated from these files: - authentication_service.proto - -It has these top-level messages: - LoginRequest - LoginReply -*/ package authentication -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // The request message containing the user's name, password and client id type LoginRequest struct { - UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName" json:"user_name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` - ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId" json:"client_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (x *LoginRequest) Reset() { + *x = LoginRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoginRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoginRequest) ProtoMessage() {} + +func (x *LoginRequest) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LoginRequest) Reset() { *m = LoginRequest{} } -func (m *LoginRequest) String() string { return proto.CompactTextString(m) } -func (*LoginRequest) ProtoMessage() {} -func (*LoginRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. +func (*LoginRequest) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{0} +} -func (m *LoginRequest) GetUserName() string { - if m != nil { - return m.UserName +func (x *LoginRequest) GetUserName() string { + if x != nil { + return x.UserName } return "" } -func (m *LoginRequest) GetPassword() string { - if m != nil { - return m.Password +func (x *LoginRequest) GetPassword() string { + if x != nil { + return x.Password } return "" } -func (m *LoginRequest) GetClientId() string { - if m != nil { - return m.ClientId +func (x *LoginRequest) GetClientId() string { + if x != nil { + return x.ClientId } return "" } +// // The response message containing the result of login attempt. // result value of true indicates success and false indicates // failure type LoginReply struct { - Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LoginReply) Reset() { *m = LoginReply{} } -func (m *LoginReply) String() string { return proto.CompactTextString(m) } -func (*LoginReply) ProtoMessage() {} -func (*LoginReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` +} -func (m *LoginReply) GetResult() bool { - if m != nil { - return m.Result +func (x *LoginReply) Reset() { + *x = LoginReply{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func init() { - proto.RegisterType((*LoginRequest)(nil), "authentication.LoginRequest") - proto.RegisterType((*LoginReply)(nil), "authentication.LoginReply") +func (x *LoginReply) String() string { + return protoimpl.X.MessageStringOf(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (*LoginReply) ProtoMessage() {} -// Client API for Login service - -type LoginClient interface { - LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) -} - -type loginClient struct { - cc *grpc.ClientConn +func (x *LoginReply) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func NewLoginClient(cc *grpc.ClientConn) LoginClient { - return &loginClient{cc} +// Deprecated: Use LoginReply.ProtoReflect.Descriptor instead. +func (*LoginReply) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{1} } -func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { - out := new(LoginReply) - err := grpc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *LoginReply) GetResult() bool { + if x != nil { + return x.Result } - return out, nil + return false } -// Server API for Login service +var File_auth_authentication_service_proto protoreflect.FileDescriptor + +var file_auth_authentication_service_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x24, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x32, + 0x51, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x48, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x69, + 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x22, 0x00, 0x42, 0x12, 0x5a, 0x10, 0x2e, 0x3b, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_auth_authentication_service_proto_rawDescOnce sync.Once + file_auth_authentication_service_proto_rawDescData = file_auth_authentication_service_proto_rawDesc +) -type LoginServer interface { - LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) +func file_auth_authentication_service_proto_rawDescGZIP() []byte { + file_auth_authentication_service_proto_rawDescOnce.Do(func() { + file_auth_authentication_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_auth_authentication_service_proto_rawDescData) + }) + return file_auth_authentication_service_proto_rawDescData } -func RegisterLoginServer(s *grpc.Server, srv LoginServer) { - s.RegisterService(&_Login_serviceDesc, srv) +var file_auth_authentication_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_auth_authentication_service_proto_goTypes = []interface{}{ + (*LoginRequest)(nil), // 0: authentication.LoginRequest + (*LoginReply)(nil), // 1: authentication.LoginReply +} +var file_auth_authentication_service_proto_depIdxs = []int32{ + 0, // 0: authentication.Login.LoginCheck:input_type -> authentication.LoginRequest + 1, // 1: authentication.Login.LoginCheck:output_type -> authentication.LoginReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LoginRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LoginServer).LoginCheck(ctx, in) +func init() { file_auth_authentication_service_proto_init() } +func file_auth_authentication_service_proto_init() { + if File_auth_authentication_service_proto != nil { + return } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/authentication.Login/LoginCheck", + if !protoimpl.UnsafeEnabled { + file_auth_authentication_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_auth_authentication_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Login_serviceDesc = grpc.ServiceDesc{ - ServiceName: "authentication.Login", - HandlerType: (*LoginServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LoginCheck", - Handler: _Login_LoginCheck_Handler, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_auth_authentication_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "authentication_service.proto", -} - -func init() { proto.RegisterFile("authentication_service.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x2c, 0x2d, 0xc9, - 0x48, 0xcd, 0x2b, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, - 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0x95, 0x55, 0x4a, 0xe1, 0xe2, - 0xf1, 0xc9, 0x4f, 0xcf, 0xcc, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x92, 0xe6, 0xe2, - 0x2c, 0x2d, 0x4e, 0x2d, 0x8a, 0xcf, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, - 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, 0x0a, 0x49, 0x71, 0x71, 0x14, 0x24, 0x16, 0x17, 0x97, - 0xe7, 0x17, 0xa5, 0x48, 0x30, 0x41, 0xe4, 0x60, 0x7c, 0x90, 0xc6, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc, - 0x92, 0xf8, 0xcc, 0x14, 0x09, 0x66, 0x88, 0x24, 0x44, 0xc0, 0x33, 0x45, 0x49, 0x85, 0x8b, 0x0b, - 0x6a, 0x4b, 0x41, 0x4e, 0xa5, 0x90, 0x18, 0x17, 0x5b, 0x51, 0x6a, 0x71, 0x69, 0x4e, 0x09, 0xd8, - 0x02, 0x8e, 0x20, 0x28, 0xcf, 0x28, 0x90, 0x8b, 0x15, 0xac, 0x4a, 0xc8, 0x03, 0xaa, 0xdc, 0x39, - 0x23, 0x35, 0x39, 0x5b, 0x48, 0x46, 0x0f, 0xd5, 0xcd, 0x7a, 0xc8, 0x0e, 0x96, 0x92, 0xc2, 0x21, - 0x5b, 0x90, 0x53, 0xa9, 0xc4, 0x90, 0xc4, 0x06, 0xf6, 0xb5, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, - 0x11, 0x57, 0x52, 0xd2, 0x15, 0x01, 0x00, 0x00, + GoTypes: file_auth_authentication_service_proto_goTypes, + DependencyIndexes: file_auth_authentication_service_proto_depIdxs, + MessageInfos: file_auth_authentication_service_proto_msgTypes, + }.Build() + File_auth_authentication_service_proto = out.File + file_auth_authentication_service_proto_rawDesc = nil + file_auth_authentication_service_proto_goTypes = nil + file_auth_authentication_service_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto index a41e13a09f7d9..f67b67a6c5730 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto @@ -25,6 +25,7 @@ syntax = "proto3"; package authentication; +option go_package = ".;authentication"; // The Login service definition. service Login { diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go new file mode 100644 index 0000000000000..bbbf200ec68be --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package authentication + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LoginClient is the client API for Login service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoginClient interface { + LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) +} + +type loginClient struct { + cc grpc.ClientConnInterface +} + +func NewLoginClient(cc grpc.ClientConnInterface) LoginClient { + return &loginClient{cc} +} + +func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { + out := new(LoginReply) + err := c.cc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LoginServer is the server API for Login service. +// All implementations must embed UnimplementedLoginServer +// for forward compatibility +type LoginServer interface { + LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) + mustEmbedUnimplementedLoginServer() +} + +// UnimplementedLoginServer must be embedded to have forward compatible implementations. +type UnimplementedLoginServer struct { +} + +func (UnimplementedLoginServer) LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoginCheck not implemented") +} +func (UnimplementedLoginServer) mustEmbedUnimplementedLoginServer() {} + +// UnsafeLoginServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoginServer will +// result in compilation errors. +type UnsafeLoginServer interface { + mustEmbedUnimplementedLoginServer() +} + +func RegisterLoginServer(s grpc.ServiceRegistrar, srv LoginServer) { + s.RegisterService(&Login_ServiceDesc, srv) +} + +func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LoginRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoginServer).LoginCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/authentication.Login/LoginCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Login_ServiceDesc is the grpc.ServiceDesc for Login service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Login_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "authentication.Login", + HandlerType: (*LoginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LoginCheck", + Handler: _Login_LoginCheck_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "auth/authentication_service.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/collection.go b/plugins/inputs/jti_openconfig_telemetry/collection.go index ffd9019f5f317..d1bad8b30c739 100644 --- a/plugins/inputs/jti_openconfig_telemetry/collection.go +++ b/plugins/inputs/jti_openconfig_telemetry/collection.go @@ -17,7 +17,7 @@ func (a CollectionByKeys) Less(i, j int) bool { return a[i].numKeys < a[j].numKe // Checks to see if there is already a group with these tags and returns its index. Returns -1 if unavailable. func (a CollectionByKeys) IsAvailable(tags map[string]string) *DataGroup { - sort.Sort(CollectionByKeys(a)) + sort.Sort(a) // Iterate through all the groups and see if we have group with these tags for _, group := range a { diff --git a/plugins/inputs/jti_openconfig_telemetry/gen.go b/plugins/inputs/jti_openconfig_telemetry/gen.go new file mode 100644 index 0000000000000..0b97e3bea9e55 --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/gen.go @@ -0,0 +1,11 @@ +package jti_openconfig_telemetry + +// To run these commands, make sure that protoc-gen-go and protoc-gen-go-grpc are installed +// > go install google.golang.org/protobuf/cmd/protoc-gen-go +// > go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +// +// Generated files were last generated with: +// - protoc-gen-go: v1.27.1 +// - protoc-gen-go-grpc: v1.1.0 +//go:generate protoc --go_out=auth/ --go-grpc_out=auth/ auth/authentication_service.proto +//go:generate protoc --go_out=oc/ --go-grpc_out=oc/ oc/oc.proto diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index bc7c780458f99..19d16dccc501a 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -1,54 +1,24 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: oc.proto - -/* -Package telemetry is a generated protocol buffer package. - -It is generated from these files: - oc.proto - -It has these top-level messages: - SubscriptionRequest - SubscriptionInput - Collector - Path - SubscriptionAdditionalConfig - SubscriptionReply - SubscriptionResponse - OpenConfigData - KeyValue - Delete - Eom - CancelSubscriptionRequest - CancelSubscriptionReply - GetSubscriptionsRequest - GetSubscriptionsReply - GetOperationalStateRequest - GetOperationalStateReply - DataEncodingRequest - DataEncodingReply -*/ -package telemetry +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: oc/oc.proto -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package telemetry import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Result of the operation type ReturnCode int32 @@ -59,21 +29,46 @@ const ( ReturnCode_UNKNOWN_ERROR ReturnCode = 2 ) -var ReturnCode_name = map[int32]string{ - 0: "SUCCESS", - 1: "NO_SUBSCRIPTION_ENTRY", - 2: "UNKNOWN_ERROR", -} -var ReturnCode_value = map[string]int32{ - "SUCCESS": 0, - "NO_SUBSCRIPTION_ENTRY": 1, - "UNKNOWN_ERROR": 2, +// Enum value maps for ReturnCode. +var ( + ReturnCode_name = map[int32]string{ + 0: "SUCCESS", + 1: "NO_SUBSCRIPTION_ENTRY", + 2: "UNKNOWN_ERROR", + } + ReturnCode_value = map[string]int32{ + "SUCCESS": 0, + "NO_SUBSCRIPTION_ENTRY": 1, + "UNKNOWN_ERROR": 2, + } +) + +func (x ReturnCode) Enum() *ReturnCode { + p := new(ReturnCode) + *p = x + return p } func (x ReturnCode) String() string { - return proto.EnumName(ReturnCode_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReturnCode) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[0].Descriptor() +} + +func (ReturnCode) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[0] +} + +func (x ReturnCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReturnCode.Descriptor instead. +func (ReturnCode) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} } -func (ReturnCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Verbosity Level type VerbosityLevel int32 @@ -84,21 +79,46 @@ const ( VerbosityLevel_BRIEF VerbosityLevel = 2 ) -var VerbosityLevel_name = map[int32]string{ - 0: "DETAIL", - 1: "TERSE", - 2: "BRIEF", -} -var VerbosityLevel_value = map[string]int32{ - "DETAIL": 0, - "TERSE": 1, - "BRIEF": 2, +// Enum value maps for VerbosityLevel. +var ( + VerbosityLevel_name = map[int32]string{ + 0: "DETAIL", + 1: "TERSE", + 2: "BRIEF", + } + VerbosityLevel_value = map[string]int32{ + "DETAIL": 0, + "TERSE": 1, + "BRIEF": 2, + } +) + +func (x VerbosityLevel) Enum() *VerbosityLevel { + p := new(VerbosityLevel) + *p = x + return p } func (x VerbosityLevel) String() string { - return proto.EnumName(VerbosityLevel_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VerbosityLevel) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[1].Descriptor() +} + +func (VerbosityLevel) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[1] +} + +func (x VerbosityLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VerbosityLevel.Descriptor instead. +func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} } -func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // Encoding Type Supported type EncodingType int32 @@ -110,126 +130,248 @@ const ( EncodingType_PROTO3 EncodingType = 3 ) -var EncodingType_name = map[int32]string{ - 0: "UNDEFINED", - 1: "XML", - 2: "JSON_IETF", - 3: "PROTO3", -} -var EncodingType_value = map[string]int32{ - "UNDEFINED": 0, - "XML": 1, - "JSON_IETF": 2, - "PROTO3": 3, +// Enum value maps for EncodingType. +var ( + EncodingType_name = map[int32]string{ + 0: "UNDEFINED", + 1: "XML", + 2: "JSON_IETF", + 3: "PROTO3", + } + EncodingType_value = map[string]int32{ + "UNDEFINED": 0, + "XML": 1, + "JSON_IETF": 2, + "PROTO3": 3, + } +) + +func (x EncodingType) Enum() *EncodingType { + p := new(EncodingType) + *p = x + return p } func (x EncodingType) String() string { - return proto.EnumName(EncodingType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EncodingType) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[2].Descriptor() +} + +func (EncodingType) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[2] +} + +func (x EncodingType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EncodingType.Descriptor instead. +func (EncodingType) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} } -func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // Message sent for a telemetry subscription request type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data associated with a telemetry subscription - Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input" json:"input,omitempty"` + Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` // The below configuration is not defined in Openconfig RPC. // It is a proposed extension to configure additional // subscription request features. - AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig" json:"additional_config,omitempty"` + AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig,proto3" json:"additional_config,omitempty"` } -func (m *SubscriptionRequest) Reset() { *m = SubscriptionRequest{} } -func (m *SubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*SubscriptionRequest) ProtoMessage() {} -func (*SubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionRequest) GetInput() *SubscriptionInput { - if m != nil { - return m.Input +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriptionRequest) GetInput() *SubscriptionInput { + if x != nil { + return x.Input } return nil } -func (m *SubscriptionRequest) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionRequest) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } -func (m *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { - if m != nil { - return m.AdditionalConfig +func (x *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { + if x != nil { + return x.AdditionalConfig } return nil } // Data associated with a telemetry subscription type SubscriptionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of optional collector endpoints to send data for // this subscription. // If no collector destinations are specified, the collector // destination is assumed to be the requester on the rpc channel. - CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList" json:"collector_list,omitempty"` + CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList,proto3" json:"collector_list,omitempty"` +} + +func (x *SubscriptionInput) Reset() { + *x = SubscriptionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SubscriptionInput) Reset() { *m = SubscriptionInput{} } -func (m *SubscriptionInput) String() string { return proto.CompactTextString(m) } -func (*SubscriptionInput) ProtoMessage() {} -func (*SubscriptionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *SubscriptionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *SubscriptionInput) GetCollectorList() []*Collector { - if m != nil { - return m.CollectorList +func (*SubscriptionInput) ProtoMessage() {} + +func (x *SubscriptionInput) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionInput.ProtoReflect.Descriptor instead. +func (*SubscriptionInput) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} +} + +func (x *SubscriptionInput) GetCollectorList() []*Collector { + if x != nil { + return x.CollectorList } return nil } // Collector endpoints to send data specified as an ip+port combination. type Collector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // IP address of collector endpoint - Address string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Transport protocol port number for the collector destination. - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } -func (m *Collector) Reset() { *m = Collector{} } -func (m *Collector) String() string { return proto.CompactTextString(m) } -func (*Collector) ProtoMessage() {} -func (*Collector) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (x *Collector) Reset() { + *x = Collector{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Collector) GetAddress() string { - if m != nil { - return m.Address +func (x *Collector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collector) ProtoMessage() {} + +func (x *Collector) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collector.ProtoReflect.Descriptor instead. +func (*Collector) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} +} + +func (x *Collector) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Collector) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Collector) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } // Data model path type Path struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data model path of interest // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Regular expression to be used in filtering state leaves - Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // If this is set to true, the target device will only send // updates to the collector upon a change in data value - SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged" json:"suppress_unchanged,omitempty"` + SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged,proto3" json:"suppress_unchanged,omitempty"` // Maximum time in ms the target device may go without sending // a message to the collector. If this time expires with // suppress-unchanged set, the target device must send an update // message regardless if the data values have changed. - MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval" json:"max_silent_interval,omitempty"` + MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval,proto3" json:"max_silent_interval,omitempty"` // Time in ms between collection and transmission of the // specified data to the collector platform. The target device // will sample the corresponding data (e.g,. a counter) and @@ -237,143 +379,263 @@ type Path struct { // // If sample-frequency is set to 0, then the network device // must emit an update upon every datum change. - SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency" json:"sample_frequency,omitempty"` + SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency,proto3" json:"sample_frequency,omitempty"` // EOM needed for each walk cycle of this path? // For periodic sensor, applicable for each complete reap // For event sensor, applicable when initial dump is over // (same as EOS) // This feature is not implemented currently. - NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom" json:"need_eom,omitempty"` + NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom,proto3" json:"need_eom,omitempty"` } -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *Path) Reset() { + *x = Path{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Path) GetPath() string { - if m != nil { - return m.Path +func (x *Path) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path) ProtoMessage() {} + +func (x *Path) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path.ProtoReflect.Descriptor instead. +func (*Path) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{3} +} + +func (x *Path) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *Path) GetFilter() string { - if m != nil { - return m.Filter +func (x *Path) GetFilter() string { + if x != nil { + return x.Filter } return "" } -func (m *Path) GetSuppressUnchanged() bool { - if m != nil { - return m.SuppressUnchanged +func (x *Path) GetSuppressUnchanged() bool { + if x != nil { + return x.SuppressUnchanged } return false } -func (m *Path) GetMaxSilentInterval() uint32 { - if m != nil { - return m.MaxSilentInterval +func (x *Path) GetMaxSilentInterval() uint32 { + if x != nil { + return x.MaxSilentInterval } return 0 } -func (m *Path) GetSampleFrequency() uint32 { - if m != nil { - return m.SampleFrequency +func (x *Path) GetSampleFrequency() uint32 { + if x != nil { + return x.SampleFrequency } return 0 } -func (m *Path) GetNeedEom() bool { - if m != nil { - return m.NeedEom +func (x *Path) GetNeedEom() bool { + if x != nil { + return x.NeedEom } return false } // Configure subscription request additional features. type SubscriptionAdditionalConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // limit the number of records sent in the stream - LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords" json:"limit_records,omitempty"` + LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords,proto3" json:"limit_records,omitempty"` // limit the time the stream remains open - LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds" json:"limit_time_seconds,omitempty"` + LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds,proto3" json:"limit_time_seconds,omitempty"` // EOS needed for this subscription? - NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos" json:"need_eos,omitempty"` + NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos,proto3" json:"need_eos,omitempty"` +} + +func (x *SubscriptionAdditionalConfig) Reset() { + *x = SubscriptionAdditionalConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionAdditionalConfig) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SubscriptionAdditionalConfig) Reset() { *m = SubscriptionAdditionalConfig{} } -func (m *SubscriptionAdditionalConfig) String() string { return proto.CompactTextString(m) } -func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (m *SubscriptionAdditionalConfig) GetLimitRecords() int32 { - if m != nil { - return m.LimitRecords +func (x *SubscriptionAdditionalConfig) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionAdditionalConfig.ProtoReflect.Descriptor instead. +func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{4} +} + +func (x *SubscriptionAdditionalConfig) GetLimitRecords() int32 { + if x != nil { + return x.LimitRecords } return 0 } -func (m *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { - if m != nil { - return m.LimitTimeSeconds +func (x *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { + if x != nil { + return x.LimitTimeSeconds } return 0 } -func (m *SubscriptionAdditionalConfig) GetNeedEos() bool { - if m != nil { - return m.NeedEos +func (x *SubscriptionAdditionalConfig) GetNeedEos() bool { + if x != nil { + return x.NeedEos } return false } // 1. Reply data message sent out using out-of-band channel. type SubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Response message to a telemetry subscription creation or // get request. - Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` } -func (m *SubscriptionReply) Reset() { *m = SubscriptionReply{} } -func (m *SubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*SubscriptionReply) ProtoMessage() {} -func (*SubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *SubscriptionReply) Reset() { + *x = SubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionReply) GetResponse() *SubscriptionResponse { - if m != nil { - return m.Response +func (x *SubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionReply) ProtoMessage() {} + +func (x *SubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionReply.ProtoReflect.Descriptor instead. +func (*SubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{5} +} + +func (x *SubscriptionReply) GetResponse() *SubscriptionResponse { + if x != nil { + return x.Response } return nil } -func (m *SubscriptionReply) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionReply) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } // Response message to a telemetry subscription creation or get request. type SubscriptionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Unique id for the subscription on the device. This is // generated by the device and returned in a subscription // request or when listing existing subscriptions - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *SubscriptionResponse) Reset() { *m = SubscriptionResponse{} } -func (m *SubscriptionResponse) String() string { return proto.CompactTextString(m) } -func (*SubscriptionResponse) ProtoMessage() {} -func (*SubscriptionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *SubscriptionResponse) Reset() { + *x = SubscriptionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionResponse) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *SubscriptionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionResponse) ProtoMessage() {} + +func (x *SubscriptionResponse) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionResponse.ProtoReflect.Descriptor instead. +func (*SubscriptionResponse) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{6} +} + +func (x *SubscriptionResponse) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } @@ -381,112 +643,147 @@ func (m *SubscriptionResponse) GetSubscriptionId() uint32 { // 2. Telemetry data send back on the same connection as the // subscription request. type OpenConfigData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // router name:export IP address - SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId" json:"system_id,omitempty"` + SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id,omitempty"` // line card / RE (slot number) - ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId" json:"component_id,omitempty"` + ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` // PFE (if applicable) - SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId" json:"sub_component_id,omitempty"` + SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId,proto3" json:"sub_component_id,omitempty"` // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,4,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Sequence number, monotonically increasing for each // system_id, component_id, sub_component_id + path. - SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` // timestamp (milliseconds since epoch) - Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp" json:"timestamp,omitempty"` + Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // List of key-value pairs - Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv,proto3" json:"kv,omitempty"` // For delete. If filled, it indicates delete - Delete []*Delete `protobuf:"bytes,8,rep,name=delete" json:"delete,omitempty"` + Delete []*Delete `protobuf:"bytes,8,rep,name=delete,proto3" json:"delete,omitempty"` // If filled, it indicates end of marker for the // respective path in the list. - Eom []*Eom `protobuf:"bytes,9,rep,name=eom" json:"eom,omitempty"` + Eom []*Eom `protobuf:"bytes,9,rep,name=eom,proto3" json:"eom,omitempty"` // If filled, it indicates end of sync for complete subscription - SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse" json:"sync_response,omitempty"` + SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse,proto3" json:"sync_response,omitempty"` } -func (m *OpenConfigData) Reset() { *m = OpenConfigData{} } -func (m *OpenConfigData) String() string { return proto.CompactTextString(m) } -func (*OpenConfigData) ProtoMessage() {} -func (*OpenConfigData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *OpenConfigData) Reset() { + *x = OpenConfigData{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *OpenConfigData) GetSystemId() string { - if m != nil { - return m.SystemId +func (x *OpenConfigData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenConfigData) ProtoMessage() {} + +func (x *OpenConfigData) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenConfigData.ProtoReflect.Descriptor instead. +func (*OpenConfigData) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{7} +} + +func (x *OpenConfigData) GetSystemId() string { + if x != nil { + return x.SystemId } return "" } -func (m *OpenConfigData) GetComponentId() uint32 { - if m != nil { - return m.ComponentId +func (x *OpenConfigData) GetComponentId() uint32 { + if x != nil { + return x.ComponentId } return 0 } -func (m *OpenConfigData) GetSubComponentId() uint32 { - if m != nil { - return m.SubComponentId +func (x *OpenConfigData) GetSubComponentId() uint32 { + if x != nil { + return x.SubComponentId } return 0 } -func (m *OpenConfigData) GetPath() string { - if m != nil { - return m.Path +func (x *OpenConfigData) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *OpenConfigData) GetSequenceNumber() uint64 { - if m != nil { - return m.SequenceNumber +func (x *OpenConfigData) GetSequenceNumber() uint64 { + if x != nil { + return x.SequenceNumber } return 0 } -func (m *OpenConfigData) GetTimestamp() uint64 { - if m != nil { - return m.Timestamp +func (x *OpenConfigData) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp } return 0 } -func (m *OpenConfigData) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *OpenConfigData) GetKv() []*KeyValue { + if x != nil { + return x.Kv } return nil } -func (m *OpenConfigData) GetDelete() []*Delete { - if m != nil { - return m.Delete +func (x *OpenConfigData) GetDelete() []*Delete { + if x != nil { + return x.Delete } return nil } -func (m *OpenConfigData) GetEom() []*Eom { - if m != nil { - return m.Eom +func (x *OpenConfigData) GetEom() []*Eom { + if x != nil { + return x.Eom } return nil } -func (m *OpenConfigData) GetSyncResponse() bool { - if m != nil { - return m.SyncResponse +func (x *OpenConfigData) GetSyncResponse() bool { + if x != nil { + return x.SyncResponse } return false } // Simple Key-value, where value could be one of scalar types type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Key - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // One of possible values // - // Types that are valid to be assigned to Value: + // Types that are assignable to Value: // *KeyValue_DoubleValue // *KeyValue_IntValue // *KeyValue_UintValue @@ -497,44 +794,44 @@ type KeyValue struct { Value isKeyValue_Value `protobuf_oneof:"value"` } -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -type isKeyValue_Value interface { - isKeyValue_Value() +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -type KeyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,oneof"` +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) } -type KeyValue_IntValue struct { - IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,oneof"` -} -type KeyValue_UintValue struct { - UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,oneof"` -} -type KeyValue_SintValue struct { - SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,oneof"` -} -type KeyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,oneof"` -} -type KeyValue_StrValue struct { - StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,oneof"` + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type KeyValue_BytesValue struct { - BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{8} } -func (*KeyValue_DoubleValue) isKeyValue_Value() {} -func (*KeyValue_IntValue) isKeyValue_Value() {} -func (*KeyValue_UintValue) isKeyValue_Value() {} -func (*KeyValue_SintValue) isKeyValue_Value() {} -func (*KeyValue_BoolValue) isKeyValue_Value() {} -func (*KeyValue_StrValue) isKeyValue_Value() {} -func (*KeyValue_BytesValue) isKeyValue_Value() {} +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} func (m *KeyValue) GetValue() isKeyValue_Value { if m != nil { @@ -543,323 +840,412 @@ func (m *KeyValue) GetValue() isKeyValue_Value { return nil } -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*KeyValue_DoubleValue); ok { +func (x *KeyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*KeyValue_DoubleValue); ok { return x.DoubleValue } return 0 } -func (m *KeyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*KeyValue_IntValue); ok { +func (x *KeyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*KeyValue_IntValue); ok { return x.IntValue } return 0 } -func (m *KeyValue) GetUintValue() uint64 { - if x, ok := m.GetValue().(*KeyValue_UintValue); ok { +func (x *KeyValue) GetUintValue() uint64 { + if x, ok := x.GetValue().(*KeyValue_UintValue); ok { return x.UintValue } return 0 } -func (m *KeyValue) GetSintValue() int64 { - if x, ok := m.GetValue().(*KeyValue_SintValue); ok { +func (x *KeyValue) GetSintValue() int64 { + if x, ok := x.GetValue().(*KeyValue_SintValue); ok { return x.SintValue } return 0 } -func (m *KeyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*KeyValue_BoolValue); ok { +func (x *KeyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*KeyValue_BoolValue); ok { return x.BoolValue } return false } -func (m *KeyValue) GetStrValue() string { - if x, ok := m.GetValue().(*KeyValue_StrValue); ok { +func (x *KeyValue) GetStrValue() string { + if x, ok := x.GetValue().(*KeyValue_StrValue); ok { return x.StrValue } return "" } -func (m *KeyValue) GetBytesValue() []byte { - if x, ok := m.GetValue().(*KeyValue_BytesValue); ok { +func (x *KeyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*KeyValue_BytesValue); ok { return x.BytesValue } return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*KeyValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _KeyValue_OneofMarshaler, _KeyValue_OneofUnmarshaler, _KeyValue_OneofSizer, []interface{}{ - (*KeyValue_DoubleValue)(nil), - (*KeyValue_IntValue)(nil), - (*KeyValue_UintValue)(nil), - (*KeyValue_SintValue)(nil), - (*KeyValue_BoolValue)(nil), - (*KeyValue_StrValue)(nil), - (*KeyValue_BytesValue)(nil), - } +type isKeyValue_Value interface { + isKeyValue_Value() } -func _KeyValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.DoubleValue)) - case *KeyValue_IntValue: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - b.EncodeVarint(8<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.SintValue)) - case *KeyValue_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *KeyValue_StrValue: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StrValue) - case *KeyValue_BytesValue: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.BytesValue) - case nil: - default: - return fmt.Errorf("KeyValue.Value has unexpected type %T", x) - } - return nil +type KeyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` } -func _KeyValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*KeyValue) - switch tag { - case 5: // value.double_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Value = &KeyValue_DoubleValue{math.Float64frombits(x)} - return true, err - case 6: // value.int_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_IntValue{int64(x)} - return true, err - case 7: // value.uint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_UintValue{x} - return true, err - case 8: // value.sint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Value = &KeyValue_SintValue{int64(x)} - return true, err - case 9: // value.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_BoolValue{x != 0} - return true, err - case 10: // value.str_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Value = &KeyValue_StrValue{x} - return true, err - case 11: // value.bytes_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Value = &KeyValue_BytesValue{x} - return true, err - default: - return false, nil - } -} - -func _KeyValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - n += proto.SizeVarint(5<<3 | proto.WireFixed64) - n += 8 - case *KeyValue_IntValue: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - n += proto.SizeVarint(7<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - n += proto.SizeVarint(8<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(uint64(x.SintValue<<1) ^ uint64((int64(x.SintValue) >> 63)))) - case *KeyValue_BoolValue: - n += proto.SizeVarint(9<<3 | proto.WireVarint) - n += 1 - case *KeyValue_StrValue: - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StrValue))) - n += len(x.StrValue) - case *KeyValue_BytesValue: - n += proto.SizeVarint(11<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.BytesValue))) - n += len(x.BytesValue) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type KeyValue_IntValue struct { + IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,proto3,oneof"` } +type KeyValue_UintValue struct { + UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,proto3,oneof"` +} + +type KeyValue_SintValue struct { + SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,proto3,oneof"` +} + +type KeyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type KeyValue_StrValue struct { + StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,proto3,oneof"` +} + +type KeyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*KeyValue_DoubleValue) isKeyValue_Value() {} + +func (*KeyValue_IntValue) isKeyValue_Value() {} + +func (*KeyValue_UintValue) isKeyValue_Value() {} + +func (*KeyValue_SintValue) isKeyValue_Value() {} + +func (*KeyValue_BoolValue) isKeyValue_Value() {} + +func (*KeyValue_StrValue) isKeyValue_Value() {} + +func (*KeyValue_BytesValue) isKeyValue_Value() {} + // Message indicating delete for a particular path type Delete struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *Delete) Reset() { + *x = Delete{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Delete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Delete) Reset() { *m = Delete{} } -func (m *Delete) String() string { return proto.CompactTextString(m) } -func (*Delete) ProtoMessage() {} -func (*Delete) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*Delete) ProtoMessage() {} -func (m *Delete) GetPath() string { - if m != nil { - return m.Path +func (x *Delete) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Delete.ProtoReflect.Descriptor instead. +func (*Delete) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{9} +} + +func (x *Delete) GetPath() string { + if x != nil { + return x.Path } return "" } // Message indicating EOM for a particular path type Eom struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (m *Eom) Reset() { *m = Eom{} } -func (m *Eom) String() string { return proto.CompactTextString(m) } -func (*Eom) ProtoMessage() {} -func (*Eom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (x *Eom) Reset() { + *x = Eom{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Eom) GetPath() string { - if m != nil { - return m.Path +func (x *Eom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Eom) ProtoMessage() {} + +func (x *Eom) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Eom.ProtoReflect.Descriptor instead. +func (*Eom) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{10} +} + +func (x *Eom) GetPath() string { + if x != nil { + return x.Path } return "" } // Message sent for a telemetry subscription cancellation request type CancelSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *CancelSubscriptionRequest) Reset() { *m = CancelSubscriptionRequest{} } -func (m *CancelSubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionRequest) ProtoMessage() {} -func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (x *CancelSubscriptionRequest) Reset() { + *x = CancelSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *CancelSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionRequest) ProtoMessage() {} + +func (x *CancelSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{11} +} + +func (x *CancelSubscriptionRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription cancellation request type CancelSubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Return code - Code ReturnCode `protobuf:"varint,1,opt,name=code,enum=telemetry.ReturnCode" json:"code,omitempty"` + Code ReturnCode `protobuf:"varint,1,opt,name=code,proto3,enum=telemetry.ReturnCode" json:"code,omitempty"` // Return code string - CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr" json:"code_str,omitempty"` + CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr,proto3" json:"code_str,omitempty"` } -func (m *CancelSubscriptionReply) Reset() { *m = CancelSubscriptionReply{} } -func (m *CancelSubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionReply) ProtoMessage() {} -func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *CancelSubscriptionReply) Reset() { + *x = CancelSubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionReply) GetCode() ReturnCode { - if m != nil { - return m.Code +func (x *CancelSubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionReply) ProtoMessage() {} + +func (x *CancelSubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionReply.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{12} +} + +func (x *CancelSubscriptionReply) GetCode() ReturnCode { + if x != nil { + return x.Code } return ReturnCode_SUCCESS } -func (m *CancelSubscriptionReply) GetCodeStr() string { - if m != nil { - return m.CodeStr +func (x *CancelSubscriptionReply) GetCodeStr() string { + if x != nil { + return x.CodeStr } return "" } // Message sent for a telemetry get request type GetSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested // --- or --- // 0xFFFFFFFF for all subscription identifiers - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *GetSubscriptionsRequest) Reset() { *m = GetSubscriptionsRequest{} } -func (m *GetSubscriptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsRequest) ProtoMessage() {} -func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *GetSubscriptionsRequest) Reset() { + *x = GetSubscriptionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsRequest) ProtoMessage() {} + +func (x *GetSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{13} +} + +func (x *GetSubscriptionsRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription get request type GetSubscriptionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of current telemetry subscriptions - SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList" json:"subscription_list,omitempty"` + SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList,proto3" json:"subscription_list,omitempty"` } -func (m *GetSubscriptionsReply) Reset() { *m = GetSubscriptionsReply{} } -func (m *GetSubscriptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsReply) ProtoMessage() {} -func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (x *GetSubscriptionsReply) Reset() { + *x = GetSubscriptionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { - if m != nil { - return m.SubscriptionList +func (x *GetSubscriptionsReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsReply) ProtoMessage() {} + +func (x *GetSubscriptionsReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsReply.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{14} +} + +func (x *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { + if x != nil { + return x.SubscriptionList } return nil } // Message sent for telemetry agent operational states request type GetOperationalStateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Per-subscription_id level operational state can be requested. // // Subscription identifier as returned by the device when @@ -870,434 +1256,718 @@ type GetOperationalStateRequest struct { // --- or --- // If subscription_id is not present then sent only agent-level // operational stats - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` // Control verbosity of the output - Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` + Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,proto3,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` } -func (m *GetOperationalStateRequest) Reset() { *m = GetOperationalStateRequest{} } -func (m *GetOperationalStateRequest) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateRequest) ProtoMessage() {} -func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *GetOperationalStateRequest) Reset() { + *x = GetOperationalStateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetOperationalStateRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetOperationalStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationalStateRequest) ProtoMessage() {} + +func (x *GetOperationalStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationalStateRequest.ProtoReflect.Descriptor instead. +func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{15} +} + +func (x *GetOperationalStateRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } -func (m *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { - if m != nil { - return m.Verbosity +func (x *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { + if x != nil { + return x.Verbosity } return VerbosityLevel_DETAIL } // Reply to telemetry agent operational states request type GetOperationalStateReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of key-value pairs where // key = operational state definition // value = operational state value - Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` } -func (m *GetOperationalStateReply) Reset() { *m = GetOperationalStateReply{} } -func (m *GetOperationalStateReply) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateReply) ProtoMessage() {} -func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *GetOperationalStateReply) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *GetOperationalStateReply) Reset() { + *x = GetOperationalStateReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -// Message sent for a data encoding request -type DataEncodingRequest struct { -} - -func (m *DataEncodingRequest) Reset() { *m = DataEncodingRequest{} } -func (m *DataEncodingRequest) String() string { return proto.CompactTextString(m) } -func (*DataEncodingRequest) ProtoMessage() {} -func (*DataEncodingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -// Reply to data encodings supported request -type DataEncodingReply struct { - EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` +func (x *GetOperationalStateReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DataEncodingReply) Reset() { *m = DataEncodingReply{} } -func (m *DataEncodingReply) String() string { return proto.CompactTextString(m) } -func (*DataEncodingReply) ProtoMessage() {} -func (*DataEncodingReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*GetOperationalStateReply) ProtoMessage() {} -func (m *DataEncodingReply) GetEncodingList() []EncodingType { - if m != nil { - return m.EncodingList +func (x *GetOperationalStateReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil -} - -func init() { - proto.RegisterType((*SubscriptionRequest)(nil), "telemetry.SubscriptionRequest") - proto.RegisterType((*SubscriptionInput)(nil), "telemetry.SubscriptionInput") - proto.RegisterType((*Collector)(nil), "telemetry.Collector") - proto.RegisterType((*Path)(nil), "telemetry.Path") - proto.RegisterType((*SubscriptionAdditionalConfig)(nil), "telemetry.SubscriptionAdditionalConfig") - proto.RegisterType((*SubscriptionReply)(nil), "telemetry.SubscriptionReply") - proto.RegisterType((*SubscriptionResponse)(nil), "telemetry.SubscriptionResponse") - proto.RegisterType((*OpenConfigData)(nil), "telemetry.OpenConfigData") - proto.RegisterType((*KeyValue)(nil), "telemetry.KeyValue") - proto.RegisterType((*Delete)(nil), "telemetry.Delete") - proto.RegisterType((*Eom)(nil), "telemetry.Eom") - proto.RegisterType((*CancelSubscriptionRequest)(nil), "telemetry.CancelSubscriptionRequest") - proto.RegisterType((*CancelSubscriptionReply)(nil), "telemetry.CancelSubscriptionReply") - proto.RegisterType((*GetSubscriptionsRequest)(nil), "telemetry.GetSubscriptionsRequest") - proto.RegisterType((*GetSubscriptionsReply)(nil), "telemetry.GetSubscriptionsReply") - proto.RegisterType((*GetOperationalStateRequest)(nil), "telemetry.GetOperationalStateRequest") - proto.RegisterType((*GetOperationalStateReply)(nil), "telemetry.GetOperationalStateReply") - proto.RegisterType((*DataEncodingRequest)(nil), "telemetry.DataEncodingRequest") - proto.RegisterType((*DataEncodingReply)(nil), "telemetry.DataEncodingReply") - proto.RegisterEnum("telemetry.ReturnCode", ReturnCode_name, ReturnCode_value) - proto.RegisterEnum("telemetry.VerbosityLevel", VerbosityLevel_name, VerbosityLevel_value) - proto.RegisterEnum("telemetry.EncodingType", EncodingType_name, EncodingType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for OpenConfigTelemetry service - -type OpenConfigTelemetryClient interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) + return mi.MessageOf(x) } -type openConfigTelemetryClient struct { - cc *grpc.ClientConn +// Deprecated: Use GetOperationalStateReply.ProtoReflect.Descriptor instead. +func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{16} } -func NewOpenConfigTelemetryClient(cc *grpc.ClientConn) OpenConfigTelemetryClient { - return &openConfigTelemetryClient{cc} -} - -func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { - stream, err := grpc.NewClientStream(ctx, &_OpenConfigTelemetry_serviceDesc.Streams[0], c.cc, "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) - if err != nil { - return nil, err - } - x := &openConfigTelemetryTelemetrySubscribeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err +func (x *GetOperationalStateReply) GetKv() []*KeyValue { + if x != nil { + return x.Kv } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type OpenConfigTelemetry_TelemetrySubscribeClient interface { - Recv() (*OpenConfigData, error) - grpc.ClientStream + return nil } -type openConfigTelemetryTelemetrySubscribeClient struct { - grpc.ClientStream +// Message sent for a data encoding request +type DataEncodingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { - m := new(OpenConfigData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *DataEncodingRequest) Reset() { + *x = DataEncodingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return m, nil } -func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { - out := new(CancelSubscriptionReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *DataEncodingRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { - out := new(GetSubscriptionsReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*DataEncodingRequest) ProtoMessage() {} -func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { - out := new(GetOperationalStateReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *DataEncodingRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { - out := new(DataEncodingReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use DataEncodingRequest.ProtoReflect.Descriptor instead. +func (*DataEncodingRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{17} } -// Server API for OpenConfigTelemetry service - -type OpenConfigTelemetryServer interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) -} +// Reply to data encodings supported request +type DataEncodingReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func RegisterOpenConfigTelemetryServer(s *grpc.Server, srv OpenConfigTelemetryServer) { - s.RegisterService(&_OpenConfigTelemetry_serviceDesc, srv) + EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,proto3,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` } -func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscriptionRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *DataEncodingReply) Reset() { + *x = DataEncodingReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) } -type OpenConfigTelemetry_TelemetrySubscribeServer interface { - Send(*OpenConfigData) error - grpc.ServerStream +func (x *DataEncodingReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type openConfigTelemetryTelemetrySubscribeServer struct { - grpc.ServerStream -} +func (*DataEncodingReply) ProtoMessage() {} -func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { - return x.ServerStream.SendMsg(m) -} - -func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/CancelTelemetrySubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) +func (x *DataEncodingReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSubscriptionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetrySubscriptions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use DataEncodingReply.ProtoReflect.Descriptor instead. +func (*DataEncodingReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{18} } -func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOperationalStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) +func (x *DataEncodingReply) GetEncodingList() []EncodingType { + if x != nil { + return x.EncodingList } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetryOperationalState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DataEncodingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetDataEncodings", +var File_oc_oc_proto protoreflect.FileDescriptor + +var file_oc_oc_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x6f, 0x63, 0x2f, 0x6f, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xcd, 0x01, 0x0a, 0x13, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x54, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x50, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3b, 0x0a, + 0x0e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x39, 0x0a, 0x09, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x75, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x55, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x69, 0x6c, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x69, 0x6c, 0x65, 0x6e, + 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x6d, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x6d, 0x22, + 0x8c, 0x01, 0x0a, 0x1c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x10, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x73, 0x22, 0x7e, + 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x3f, + 0x0a, 0x14, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, + 0xec, 0x02, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x64, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, + 0x62, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x12, 0x29, 0x0a, 0x06, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x6f, 0x6d, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x45, 0x6f, 0x6d, 0x52, 0x03, 0x65, 0x6f, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8e, + 0x02, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x75, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x09, 0x75, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x12, 0x48, 0x00, 0x52, 0x09, 0x73, 0x69, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x1c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x19, 0x0a, + 0x03, 0x45, 0x6f, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x44, 0x0a, 0x19, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x5f, + 0x0a, 0x17, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, + 0x42, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x11, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x52, 0x10, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x7e, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x37, + 0x0a, 0x09, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x09, 0x76, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x22, 0x3f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x51, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x69, + 0x73, 0x74, 0x2a, 0x47, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x19, 0x0a, + 0x15, 0x4e, 0x4f, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x0e, 0x56, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0a, 0x0a, + 0x06, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x45, 0x52, + 0x53, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x52, 0x49, 0x45, 0x46, 0x10, 0x02, 0x2a, + 0x41, 0x0a, 0x0c, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x58, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x49, 0x45, 0x54, 0x46, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, + 0x10, 0x03, 0x32, 0xfc, 0x03, 0x0a, 0x13, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x70, 0x65, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x69, 0x0a, 0x1b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x19, 0x67, 0x65, + 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, + 0x6c, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x25, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x10, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x3b, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oc_oc_proto_rawDescOnce sync.Once + file_oc_oc_proto_rawDescData = file_oc_oc_proto_rawDesc +) + +func file_oc_oc_proto_rawDescGZIP() []byte { + file_oc_oc_proto_rawDescOnce.Do(func() { + file_oc_oc_proto_rawDescData = protoimpl.X.CompressGZIP(file_oc_oc_proto_rawDescData) + }) + return file_oc_oc_proto_rawDescData +} + +var file_oc_oc_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_oc_oc_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_oc_oc_proto_goTypes = []interface{}{ + (ReturnCode)(0), // 0: telemetry.ReturnCode + (VerbosityLevel)(0), // 1: telemetry.VerbosityLevel + (EncodingType)(0), // 2: telemetry.EncodingType + (*SubscriptionRequest)(nil), // 3: telemetry.SubscriptionRequest + (*SubscriptionInput)(nil), // 4: telemetry.SubscriptionInput + (*Collector)(nil), // 5: telemetry.Collector + (*Path)(nil), // 6: telemetry.Path + (*SubscriptionAdditionalConfig)(nil), // 7: telemetry.SubscriptionAdditionalConfig + (*SubscriptionReply)(nil), // 8: telemetry.SubscriptionReply + (*SubscriptionResponse)(nil), // 9: telemetry.SubscriptionResponse + (*OpenConfigData)(nil), // 10: telemetry.OpenConfigData + (*KeyValue)(nil), // 11: telemetry.KeyValue + (*Delete)(nil), // 12: telemetry.Delete + (*Eom)(nil), // 13: telemetry.Eom + (*CancelSubscriptionRequest)(nil), // 14: telemetry.CancelSubscriptionRequest + (*CancelSubscriptionReply)(nil), // 15: telemetry.CancelSubscriptionReply + (*GetSubscriptionsRequest)(nil), // 16: telemetry.GetSubscriptionsRequest + (*GetSubscriptionsReply)(nil), // 17: telemetry.GetSubscriptionsReply + (*GetOperationalStateRequest)(nil), // 18: telemetry.GetOperationalStateRequest + (*GetOperationalStateReply)(nil), // 19: telemetry.GetOperationalStateReply + (*DataEncodingRequest)(nil), // 20: telemetry.DataEncodingRequest + (*DataEncodingReply)(nil), // 21: telemetry.DataEncodingReply +} +var file_oc_oc_proto_depIdxs = []int32{ + 4, // 0: telemetry.SubscriptionRequest.input:type_name -> telemetry.SubscriptionInput + 6, // 1: telemetry.SubscriptionRequest.path_list:type_name -> telemetry.Path + 7, // 2: telemetry.SubscriptionRequest.additional_config:type_name -> telemetry.SubscriptionAdditionalConfig + 5, // 3: telemetry.SubscriptionInput.collector_list:type_name -> telemetry.Collector + 9, // 4: telemetry.SubscriptionReply.response:type_name -> telemetry.SubscriptionResponse + 6, // 5: telemetry.SubscriptionReply.path_list:type_name -> telemetry.Path + 11, // 6: telemetry.OpenConfigData.kv:type_name -> telemetry.KeyValue + 12, // 7: telemetry.OpenConfigData.delete:type_name -> telemetry.Delete + 13, // 8: telemetry.OpenConfigData.eom:type_name -> telemetry.Eom + 0, // 9: telemetry.CancelSubscriptionReply.code:type_name -> telemetry.ReturnCode + 8, // 10: telemetry.GetSubscriptionsReply.subscription_list:type_name -> telemetry.SubscriptionReply + 1, // 11: telemetry.GetOperationalStateRequest.verbosity:type_name -> telemetry.VerbosityLevel + 11, // 12: telemetry.GetOperationalStateReply.kv:type_name -> telemetry.KeyValue + 2, // 13: telemetry.DataEncodingReply.encoding_list:type_name -> telemetry.EncodingType + 3, // 14: telemetry.OpenConfigTelemetry.telemetrySubscribe:input_type -> telemetry.SubscriptionRequest + 14, // 15: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:input_type -> telemetry.CancelSubscriptionRequest + 16, // 16: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:input_type -> telemetry.GetSubscriptionsRequest + 18, // 17: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:input_type -> telemetry.GetOperationalStateRequest + 20, // 18: telemetry.OpenConfigTelemetry.getDataEncodings:input_type -> telemetry.DataEncodingRequest + 10, // 19: telemetry.OpenConfigTelemetry.telemetrySubscribe:output_type -> telemetry.OpenConfigData + 15, // 20: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:output_type -> telemetry.CancelSubscriptionReply + 17, // 21: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:output_type -> telemetry.GetSubscriptionsReply + 19, // 22: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:output_type -> telemetry.GetOperationalStateReply + 21, // 23: telemetry.OpenConfigTelemetry.getDataEncodings:output_type -> telemetry.DataEncodingReply + 19, // [19:24] is the sub-list for method output_type + 14, // [14:19] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_oc_oc_proto_init() } +func file_oc_oc_proto_init() { + if File_oc_oc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oc_oc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionAdditionalConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenConfigData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Delete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Eom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + file_oc_oc_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*KeyValue_DoubleValue)(nil), + (*KeyValue_IntValue)(nil), + (*KeyValue_UintValue)(nil), + (*KeyValue_SintValue)(nil), + (*KeyValue_BoolValue)(nil), + (*KeyValue_StrValue)(nil), + (*KeyValue_BytesValue)(nil), } - return interceptor(ctx, in, info, handler) -} - -var _OpenConfigTelemetry_serviceDesc = grpc.ServiceDesc{ - ServiceName: "telemetry.OpenConfigTelemetry", - HandlerType: (*OpenConfigTelemetryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "cancelTelemetrySubscription", - Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, - }, - { - MethodName: "getTelemetrySubscriptions", - Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, - }, - { - MethodName: "getTelemetryOperationalState", - Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, - }, - { - MethodName: "getDataEncodings", - Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "telemetrySubscribe", - Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, - ServerStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oc_oc_proto_rawDesc, + NumEnums: 3, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "oc.proto", -} - -func init() { proto.RegisterFile("oc.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0x25, 0xd9, 0x12, 0xaf, 0x7e, 0x42, 0x8d, 0xe3, 0x2f, 0xb2, 0xa3, 0xaf, 0x71, 0xe8, - 0x16, 0x71, 0x82, 0xd4, 0x28, 0x94, 0x45, 0x51, 0xa4, 0x40, 0x10, 0xcb, 0x74, 0xac, 0xc6, 0x95, - 0xdc, 0xa1, 0x9c, 0xb6, 0x2b, 0x82, 0x22, 0x27, 0x36, 0x11, 0xfe, 0x95, 0x33, 0x12, 0xc2, 0x4d, - 0x9e, 0xa0, 0xe8, 0x9b, 0x75, 0xdd, 0x97, 0xe8, 0x23, 0x74, 0x51, 0xcc, 0x90, 0x94, 0x46, 0x89, - 0x94, 0x34, 0x2b, 0x91, 0xe7, 0x9e, 0xb9, 0xf7, 0xcc, 0xbd, 0x67, 0x86, 0x82, 0x7a, 0xe4, 0x1c, - 0xc7, 0x49, 0xc4, 0x22, 0xa4, 0x32, 0xe2, 0x93, 0x80, 0xb0, 0x24, 0xd5, 0xff, 0x54, 0x60, 0xc7, - 0x9c, 0x4d, 0xa9, 0x93, 0x78, 0x31, 0xf3, 0xa2, 0x10, 0x93, 0xdf, 0x66, 0x84, 0x32, 0xd4, 0x87, - 0x2d, 0x2f, 0x8c, 0x67, 0xac, 0xab, 0x1c, 0x28, 0x47, 0x8d, 0x7e, 0xef, 0x78, 0xb1, 0xe4, 0x58, - 0xa6, 0x0f, 0x39, 0x07, 0x67, 0x54, 0xf4, 0x18, 0xd4, 0xd8, 0x66, 0x37, 0x96, 0xef, 0x51, 0xd6, - 0x2d, 0x1f, 0x54, 0x8e, 0x1a, 0xfd, 0x5b, 0xd2, 0xba, 0x4b, 0x9b, 0xdd, 0xe0, 0x3a, 0x67, 0x5c, - 0x78, 0x94, 0xa1, 0x09, 0x74, 0x6c, 0xd7, 0xf5, 0x78, 0x16, 0xdb, 0xb7, 0x9c, 0x28, 0x7c, 0xed, - 0x5d, 0x77, 0x2b, 0xa2, 0xda, 0x83, 0x0d, 0xd5, 0x9e, 0x2f, 0xf8, 0x03, 0x41, 0xc7, 0x9a, 0xfd, - 0x1e, 0xa2, 0x5f, 0x42, 0xe7, 0x03, 0x7d, 0xe8, 0x29, 0xb4, 0x9d, 0xc8, 0xf7, 0x89, 0xc3, 0xa2, - 0x24, 0x53, 0xa7, 0x08, 0x75, 0xb7, 0xa5, 0x3a, 0x83, 0x82, 0x80, 0x5b, 0x0b, 0x2e, 0xd7, 0xa9, - 0x7f, 0x07, 0xea, 0x22, 0x86, 0xba, 0x50, 0xb3, 0x5d, 0x37, 0x21, 0x94, 0x8a, 0xc6, 0xa8, 0xb8, - 0x78, 0x45, 0x08, 0xaa, 0x71, 0x94, 0xf0, 0x7d, 0x2b, 0x47, 0x2d, 0x2c, 0x9e, 0xf5, 0xbf, 0x14, - 0xa8, 0xf2, 0x5d, 0x8b, 0xa0, 0xcd, 0x6e, 0xf2, 0x35, 0xe2, 0x19, 0xfd, 0x0f, 0xb6, 0x5f, 0x7b, - 0x3e, 0x23, 0x89, 0x58, 0xa2, 0xe2, 0xfc, 0x0d, 0x7d, 0x0d, 0x88, 0xce, 0xe2, 0x98, 0x27, 0xb5, - 0x66, 0xa1, 0x73, 0x63, 0x87, 0xd7, 0xc4, 0x15, 0x8d, 0xa9, 0xe3, 0x4e, 0x11, 0xb9, 0x2a, 0x02, - 0xe8, 0x18, 0x76, 0x02, 0xfb, 0xad, 0x45, 0x3d, 0x9f, 0x84, 0xcc, 0xf2, 0x42, 0x46, 0x92, 0xb9, - 0xed, 0x77, 0xab, 0x42, 0x46, 0x27, 0xb0, 0xdf, 0x9a, 0x22, 0x32, 0xcc, 0x03, 0xe8, 0x21, 0x68, - 0xd4, 0x0e, 0x62, 0x9f, 0x58, 0xaf, 0x13, 0x3e, 0xeb, 0xd0, 0x49, 0xbb, 0x5b, 0x82, 0x7c, 0x2b, - 0xc3, 0xcf, 0x0a, 0x18, 0xed, 0x41, 0x3d, 0x24, 0xc4, 0xb5, 0x48, 0x14, 0x74, 0xb7, 0x45, 0xfd, - 0x1a, 0x7f, 0x37, 0xa2, 0x40, 0xff, 0x5d, 0x81, 0xde, 0xc7, 0x26, 0x83, 0x0e, 0xa1, 0xe5, 0x7b, - 0x81, 0xc7, 0xac, 0x84, 0x38, 0x51, 0xe2, 0x66, 0xed, 0xda, 0xc2, 0x4d, 0x01, 0xe2, 0x0c, 0x43, - 0x8f, 0x01, 0x65, 0x24, 0xe6, 0x05, 0xc4, 0xa2, 0xc4, 0x89, 0x42, 0x97, 0x8a, 0x76, 0x6c, 0x61, - 0x4d, 0x44, 0x26, 0x5e, 0x40, 0xcc, 0x0c, 0x97, 0xe4, 0xd0, 0xbc, 0x1d, 0xb9, 0x1c, 0xaa, 0xbf, - 0x5b, 0x9d, 0x3a, 0x26, 0xb1, 0x9f, 0xa2, 0xa7, 0x50, 0x4f, 0x08, 0x8d, 0xa3, 0x90, 0x92, 0xdc, - 0xc5, 0xf7, 0x36, 0xf8, 0x0a, 0xe7, 0x34, 0xbc, 0x58, 0xf0, 0x79, 0x5e, 0xd6, 0x9f, 0xc1, 0xed, - 0x75, 0xf9, 0xd0, 0x03, 0xb8, 0x45, 0x25, 0xdc, 0xf2, 0x5c, 0xa1, 0xa4, 0x85, 0xdb, 0x32, 0x3c, - 0x74, 0xf5, 0xbf, 0xcb, 0xd0, 0x1e, 0xc7, 0x24, 0xcc, 0xba, 0x77, 0x6a, 0x33, 0x1b, 0xdd, 0x05, - 0x95, 0xa6, 0x94, 0x91, 0xa0, 0x58, 0xa5, 0xe2, 0x7a, 0x06, 0x0c, 0x5d, 0x74, 0x1f, 0x9a, 0x4e, - 0x14, 0xc4, 0x51, 0x28, 0x86, 0xee, 0xe6, 0xae, 0x6b, 0x2c, 0xb0, 0xa1, 0x8b, 0x8e, 0x40, 0xa3, - 0xb3, 0xa9, 0xb5, 0x42, 0xab, 0x2c, 0x8a, 0x0f, 0x24, 0x66, 0xe1, 0xce, 0xaa, 0xe4, 0x4e, 0xae, - 0x3c, 0xf3, 0x01, 0xb1, 0xc2, 0x59, 0x30, 0x25, 0x89, 0x70, 0x49, 0x15, 0xb7, 0x0b, 0x78, 0x24, - 0x50, 0xd4, 0x03, 0x95, 0x4f, 0x8f, 0x32, 0x3b, 0x88, 0x85, 0x4b, 0xaa, 0x78, 0x09, 0xa0, 0x43, - 0x28, 0xbf, 0x99, 0x77, 0x6b, 0xa2, 0x7f, 0x3b, 0x52, 0xff, 0x5e, 0x92, 0xf4, 0x95, 0xed, 0xcf, - 0x08, 0x2e, 0xbf, 0x99, 0xa3, 0x87, 0xb0, 0xed, 0x12, 0x9f, 0x30, 0xd2, 0xad, 0x0b, 0x62, 0x47, - 0x22, 0x9e, 0x8a, 0x00, 0xce, 0x09, 0xe8, 0x00, 0x2a, 0xdc, 0x8d, 0xaa, 0xe0, 0xb5, 0x25, 0x9e, - 0x11, 0x05, 0x98, 0x87, 0xb8, 0xf1, 0x68, 0x1a, 0x3a, 0xd6, 0x62, 0xf4, 0x20, 0xac, 0xd2, 0xe4, - 0x60, 0x31, 0x17, 0xfd, 0x8f, 0x32, 0xd4, 0x0b, 0x09, 0x48, 0x83, 0xca, 0x1b, 0x92, 0xe6, 0x2d, - 0xe6, 0x8f, 0xe8, 0x10, 0x9a, 0x6e, 0x34, 0x9b, 0xfa, 0xc4, 0x9a, 0x73, 0x86, 0xd8, 0xb9, 0x72, - 0x5e, 0xc2, 0x8d, 0x0c, 0xcd, 0x96, 0xfd, 0x1f, 0x54, 0x2f, 0x64, 0x39, 0x83, 0x6f, 0xbc, 0x72, - 0x5e, 0xc2, 0x75, 0x2f, 0x64, 0x59, 0xf8, 0x1e, 0xc0, 0x6c, 0x19, 0xaf, 0xf1, 0xc6, 0x9c, 0x97, - 0xb0, 0x3a, 0x93, 0x09, 0x74, 0x49, 0xa8, 0x1f, 0x28, 0x47, 0x88, 0x13, 0xa8, 0x4c, 0x98, 0x46, - 0x91, 0x9f, 0x13, 0x54, 0xbe, 0x0d, 0x4e, 0xe0, 0xd8, 0x42, 0x01, 0x65, 0x49, 0x1e, 0xe7, 0xdb, - 0x54, 0xb9, 0x02, 0xca, 0x92, 0x2c, 0x7c, 0x1f, 0x1a, 0xd3, 0x94, 0x11, 0x9a, 0x13, 0x1a, 0x07, - 0xca, 0x51, 0xf3, 0xbc, 0x84, 0x41, 0x80, 0x82, 0x72, 0x52, 0x83, 0x2d, 0x11, 0xd4, 0x7b, 0xb0, - 0x9d, 0x75, 0x7a, 0xdd, 0x55, 0xa5, 0xef, 0x41, 0xc5, 0x88, 0x82, 0xb5, 0xa1, 0x53, 0xd8, 0x1b, - 0xd8, 0xa1, 0x43, 0xfc, 0x75, 0x1f, 0x91, 0xff, 0x6c, 0x7f, 0x0b, 0xee, 0xac, 0xcb, 0xc2, 0x4f, - 0xf1, 0x43, 0xa8, 0x3a, 0x91, 0x9b, 0x9d, 0xe0, 0x76, 0x7f, 0x57, 0x1a, 0x39, 0x26, 0x6c, 0x96, - 0x84, 0x83, 0xc8, 0x25, 0x58, 0x50, 0xf8, 0x05, 0xc1, 0x7f, 0x2d, 0xca, 0x8a, 0x3b, 0xb5, 0xc6, - 0xdf, 0x4d, 0x96, 0xe8, 0x27, 0x70, 0xe7, 0x05, 0x61, 0x72, 0x76, 0xfa, 0xd9, 0x22, 0xa7, 0xb0, - 0xfb, 0x61, 0x0e, 0x2e, 0x71, 0x08, 0x9d, 0x95, 0x0c, 0xd2, 0x17, 0xa6, 0xb7, 0xf1, 0xc6, 0x89, - 0xfd, 0x14, 0x6b, 0xf2, 0x32, 0x71, 0x91, 0xbc, 0x83, 0xfd, 0x17, 0x84, 0x8d, 0x63, 0x92, 0xd8, - 0xd9, 0x75, 0x6a, 0x32, 0x9b, 0x91, 0xcf, 0x95, 0x8a, 0xbe, 0x05, 0x75, 0x4e, 0x92, 0x69, 0x44, - 0x3d, 0x96, 0x8a, 0x56, 0xb4, 0xfb, 0x7b, 0x92, 0x92, 0x57, 0x45, 0xec, 0x82, 0xcc, 0x89, 0x8f, - 0x97, 0x5c, 0xfd, 0x19, 0x74, 0xd7, 0xd6, 0xe7, 0xdb, 0xcc, 0xce, 0xb2, 0xf2, 0xd1, 0xb3, 0xac, - 0xef, 0xc2, 0x0e, 0xbf, 0xbd, 0x8c, 0xd0, 0x89, 0x5c, 0x2f, 0xbc, 0xce, 0x95, 0xeb, 0x3f, 0x41, - 0x67, 0x15, 0xe6, 0x09, 0xbf, 0x87, 0x16, 0xc9, 0x81, 0x65, 0xcf, 0xda, 0xfd, 0x3b, 0xf2, 0xb1, - 0xce, 0xe3, 0x93, 0x34, 0x26, 0xb8, 0x59, 0xb0, 0x79, 0xab, 0x1e, 0xbd, 0x00, 0x58, 0x3a, 0x00, - 0x35, 0xa0, 0x66, 0x5e, 0x0d, 0x06, 0x86, 0x69, 0x6a, 0x25, 0xb4, 0x07, 0xbb, 0xa3, 0xb1, 0x65, - 0x5e, 0x9d, 0x98, 0x03, 0x3c, 0xbc, 0x9c, 0x0c, 0xc7, 0x23, 0xcb, 0x18, 0x4d, 0xf0, 0xaf, 0x9a, - 0x82, 0x3a, 0xd0, 0xba, 0x1a, 0xbd, 0x1c, 0x8d, 0x7f, 0x1e, 0x59, 0x06, 0xc6, 0x63, 0xac, 0x95, - 0x1f, 0xf5, 0xa1, 0xbd, 0xda, 0x10, 0x04, 0xb0, 0x7d, 0x6a, 0x4c, 0x9e, 0x0f, 0x2f, 0xb4, 0x12, - 0x52, 0x61, 0x6b, 0x62, 0x60, 0xd3, 0xd0, 0x14, 0xfe, 0x78, 0x82, 0x87, 0xc6, 0x99, 0x56, 0x7e, - 0xf4, 0x1c, 0x9a, 0xb2, 0x34, 0xd4, 0x02, 0xf5, 0x6a, 0x74, 0x6a, 0x9c, 0x0d, 0x47, 0xc6, 0xa9, - 0x56, 0x42, 0x35, 0xa8, 0xfc, 0xf2, 0xe3, 0x85, 0xa6, 0x70, 0xfc, 0x07, 0x73, 0x3c, 0xb2, 0x86, - 0xc6, 0xe4, 0x4c, 0x2b, 0xf3, 0xc4, 0x97, 0x78, 0x3c, 0x19, 0x3f, 0xd1, 0x2a, 0xfd, 0x7f, 0x2a, - 0xb0, 0xb3, 0xbc, 0xf2, 0x27, 0xc5, 0x96, 0x91, 0x09, 0x68, 0xb1, 0xff, 0xdc, 0x32, 0x53, 0x82, - 0xbe, 0xd8, 0x68, 0x24, 0xd1, 0xe0, 0x7d, 0x79, 0xbc, 0xab, 0x1f, 0x12, 0xbd, 0xf4, 0x8d, 0x82, - 0x3c, 0xb8, 0xeb, 0x88, 0x03, 0x36, 0x79, 0x2f, 0xb5, 0x48, 0x82, 0xbe, 0x94, 0xff, 0x08, 0x6d, - 0x3a, 0xce, 0xfb, 0xfa, 0x27, 0x58, 0xb1, 0x9f, 0xea, 0x25, 0xe4, 0xc0, 0xde, 0x35, 0x61, 0x6b, - 0xeb, 0x50, 0x24, 0xa7, 0xd8, 0x70, 0x20, 0xf7, 0x0f, 0x3e, 0xca, 0xc9, 0x8a, 0xf8, 0xd0, 0x93, - 0x8b, 0xbc, 0x6f, 0x58, 0xf4, 0xd5, 0x6a, 0x8e, 0x0d, 0x07, 0x6a, 0xff, 0xf0, 0x53, 0xb4, 0xac, - 0x1a, 0x06, 0xed, 0x9a, 0x30, 0xd9, 0xc0, 0x74, 0x65, 0x20, 0x6b, 0x1c, 0xbf, 0xdf, 0xdb, 0x18, - 0x17, 0x39, 0xa7, 0xdb, 0xe2, 0xaf, 0xf8, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xe3, - 0x4f, 0x0d, 0x96, 0x0b, 0x00, 0x00, + GoTypes: file_oc_oc_proto_goTypes, + DependencyIndexes: file_oc_oc_proto_depIdxs, + EnumInfos: file_oc_oc_proto_enumTypes, + MessageInfos: file_oc_oc_proto_msgTypes, + }.Build() + File_oc_oc_proto = out.File + file_oc_oc_proto_rawDesc = nil + file_oc_oc_proto_goTypes = nil + file_oc_oc_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index cf4aa145e6911..8c3ad32b9913f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -36,6 +36,7 @@ syntax = "proto3"; package telemetry; +option go_package = ".;telemetry"; // Interface exported by Agent service OpenConfigTelemetry { diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go new file mode 100644 index 0000000000000..593e5a1e1002a --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package telemetry + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// OpenConfigTelemetryClient is the client API for OpenConfigTelemetry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenConfigTelemetryClient interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) +} + +type openConfigTelemetryClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenConfigTelemetryClient(cc grpc.ClientConnInterface) OpenConfigTelemetryClient { + return &openConfigTelemetryClient{cc} +} + +func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenConfigTelemetry_ServiceDesc.Streams[0], "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) + if err != nil { + return nil, err + } + x := &openConfigTelemetryTelemetrySubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenConfigTelemetry_TelemetrySubscribeClient interface { + Recv() (*OpenConfigData, error) + grpc.ClientStream +} + +type openConfigTelemetryTelemetrySubscribeClient struct { + grpc.ClientStream +} + +func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { + m := new(OpenConfigData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { + out := new(CancelSubscriptionReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { + out := new(GetSubscriptionsReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { + out := new(GetOperationalStateReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { + out := new(DataEncodingReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OpenConfigTelemetryServer is the server API for OpenConfigTelemetry service. +// All implementations must embed UnimplementedOpenConfigTelemetryServer +// for forward compatibility +type OpenConfigTelemetryServer interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +// UnimplementedOpenConfigTelemetryServer must be embedded to have forward compatible implementations. +type UnimplementedOpenConfigTelemetryServer struct { +} + +func (UnimplementedOpenConfigTelemetryServer) TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method TelemetrySubscribe not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelTelemetrySubscription not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetrySubscriptions not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetryOperationalState not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDataEncodings not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) mustEmbedUnimplementedOpenConfigTelemetryServer() {} + +// UnsafeOpenConfigTelemetryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenConfigTelemetryServer will +// result in compilation errors. +type UnsafeOpenConfigTelemetryServer interface { + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +func RegisterOpenConfigTelemetryServer(s grpc.ServiceRegistrar, srv OpenConfigTelemetryServer) { + s.RegisterService(&OpenConfigTelemetry_ServiceDesc, srv) +} + +func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) +} + +type OpenConfigTelemetry_TelemetrySubscribeServer interface { + Send(*OpenConfigData) error + grpc.ServerStream +} + +type openConfigTelemetryTelemetrySubscribeServer struct { + grpc.ServerStream +} + +func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { + return x.ServerStream.SendMsg(m) +} + +func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationalStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataEncodingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getDataEncodings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// OpenConfigTelemetry_ServiceDesc is the grpc.ServiceDesc for OpenConfigTelemetry service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenConfigTelemetry_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "telemetry.OpenConfigTelemetry", + HandlerType: (*OpenConfigTelemetryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "cancelTelemetrySubscription", + Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, + }, + { + MethodName: "getTelemetrySubscriptions", + Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, + }, + { + MethodName: "getTelemetryOperationalState", + Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, + }, + { + MethodName: "getDataEncodings", + Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "telemetrySubscribe", + Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "oc/oc.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index 0c6fc9e052d43..b95930cd42f87 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -8,29 +8,30 @@ import ( "sync" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - internaltls "github.com/influxdata/telegraf/plugins/common/tls" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" - "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + internaltls "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" + "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" ) type OpenConfigTelemetry struct { - Servers []string `toml:"servers"` - Sensors []string `toml:"sensors"` - Username string `toml:"username"` - Password string `toml:"password"` - ClientID string `toml:"client_id"` - SampleFrequency internal.Duration `toml:"sample_frequency"` - StrAsTags bool `toml:"str_as_tags"` - RetryDelay internal.Duration `toml:"retry_delay"` - EnableTLS bool `toml:"enable_tls"` + Servers []string `toml:"servers"` + Sensors []string `toml:"sensors"` + Username string `toml:"username"` + Password string `toml:"password"` + ClientID string `toml:"client_id"` + SampleFrequency config.Duration `toml:"sample_frequency"` + StrAsTags bool `toml:"str_as_tags"` + RetryDelay config.Duration `toml:"retry_delay"` + EnableTLS bool `toml:"enable_tls"` internaltls.ClientConfig Log telegraf.Logger @@ -42,7 +43,7 @@ type OpenConfigTelemetry struct { var ( // Regex to match and extract data points from path value in received key - keyPathRegex = regexp.MustCompile("\\/([^\\/]*)\\[([A-Za-z0-9\\-\\/]*\\=[^\\[]*)\\]") + keyPathRegex = regexp.MustCompile(`/([^/]*)\[([A-Za-z0-9\-/]*=[^\[]*)]`) sampleConfig = ` ## List of device addresses to collect telemetry from servers = ["localhost:1883"] @@ -102,7 +103,7 @@ func (m *OpenConfigTelemetry) Description() string { return "Read JTI OpenConfig Telemetry from listed sensors" } -func (m *OpenConfigTelemetry) Gather(acc telegraf.Accumulator) error { +func (m *OpenConfigTelemetry) Gather(_ telegraf.Accumulator) error { return nil } @@ -169,25 +170,18 @@ func (m *OpenConfigTelemetry) extractData(r *telemetry.OpenConfigData, grpcServe } else { kv[xmlpath] = v.GetStrValue() } - break case *telemetry.KeyValue_DoubleValue: kv[xmlpath] = v.GetDoubleValue() - break case *telemetry.KeyValue_IntValue: kv[xmlpath] = v.GetIntValue() - break case *telemetry.KeyValue_UintValue: kv[xmlpath] = v.GetUintValue() - break case *telemetry.KeyValue_SintValue: kv[xmlpath] = v.GetSintValue() - break case *telemetry.KeyValue_BoolValue: kv[xmlpath] = v.GetBoolValue() - break case *telemetry.KeyValue_BytesValue: kv[xmlpath] = v.GetBytesValue() - break } // Insert other tags from message @@ -226,7 +220,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { m.sensorsConfig = make([]sensorConfig, 0) for _, sensor := range m.Sensors { spathSplit := strings.Fields(sensor) - reportingRate = uint32(m.SampleFrequency.Duration / time.Millisecond) + reportingRate = uint32(time.Duration(m.SampleFrequency) / time.Millisecond) // Extract measurement name and custom reporting rate if specified. Custom // reporting rate will be specified at the beginning of sensor list, @@ -272,16 +266,18 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { m.sensorsConfig = append(m.sensorsConfig, sensorConfig{ measurementName: measurementName, pathList: pathlist, }) - } return len(m.sensorsConfig) } // Subscribes and collects OpenConfig telemetry data from given server -func (m *OpenConfigTelemetry) collectData(ctx context.Context, - grpcServer string, grpcClientConn *grpc.ClientConn, - acc telegraf.Accumulator) error { +func (m *OpenConfigTelemetry) collectData( + ctx context.Context, + grpcServer string, + grpcClientConn *grpc.ClientConn, + acc telegraf.Accumulator, +) { c := telemetry.NewOpenConfigTelemetryClient(grpcClientConn) for _, sensor := range m.sensorsConfig { m.wg.Add(1) @@ -298,17 +294,15 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer, err)) return - } else { - // Retry with delay. If delay is not provided, use default - if m.RetryDelay.Duration > 0 { - m.Log.Debugf("Retrying %s with timeout %v", grpcServer, - m.RetryDelay.Duration) - time.Sleep(m.RetryDelay.Duration) - continue - } else { - return - } } + + // Retry with delay. If delay is not provided, use default + if time.Duration(m.RetryDelay) > 0 { + m.Log.Debugf("Retrying %s with timeout %v", grpcServer, time.Duration(m.RetryDelay)) + time.Sleep(time.Duration(m.RetryDelay)) + continue + } + return } for { r, err := stream.Recv() @@ -345,8 +339,6 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, } }(ctx, sensor) } - - return nil } func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { @@ -417,7 +409,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { func init() { inputs.Add("jti_openconfig_telemetry", func() telegraf.Input { return &OpenConfigTelemetry{ - RetryDelay: internal.Duration{Duration: time.Second}, + RetryDelay: config.Duration(time.Second), StrAsTags: false, } }) diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index a3df62e1bb0c0..9fed6a324bf34 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -19,7 +19,7 @@ import ( var cfg = &OpenConfigTelemetry{ Log: testutil.Logger{}, Servers: []string{"127.0.0.1:50051"}, - SampleFrequency: internal.Duration{Duration: time.Second * 2}, + SampleFrequency: config.Duration(time.Second * 2), } var data = &telemetry.OpenConfigData{ @@ -27,55 +27,57 @@ var data = &telemetry.OpenConfigData{ Kv: []*telemetry.KeyValue{{Key: "/sensor[tag='tagValue']/intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_prefix = &telemetry.OpenConfigData{ +var dataWithPrefix = &telemetry.OpenConfigData{ Path: "/sensor_with_prefix", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_multiple_tags = &telemetry.OpenConfigData{ +var dataWithMultipleTags = &telemetry.OpenConfigData{ Path: "/sensor_with_multiple_tags", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "tagKey[tag='tagValue']/boolKey", Value: &telemetry.KeyValue_BoolValue{BoolValue: false}}, {Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_string_values = &telemetry.OpenConfigData{ +var dataWithStringValues = &telemetry.OpenConfigData{ Path: "/sensor_with_string_values", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "strKey[tag='tagValue']/strValue", Value: &telemetry.KeyValue_StrValue{StrValue: "10"}}}, } type openConfigTelemetryServer struct { + telemetry.UnimplementedOpenConfigTelemetryServer } func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { path := req.PathList[0].Path - if path == "/sensor" { - stream.Send(data) - } else if path == "/sensor_with_prefix" { - stream.Send(data_with_prefix) - } else if path == "/sensor_with_multiple_tags" { - stream.Send(data_with_multiple_tags) - } else if path == "/sensor_with_string_values" { - stream.Send(data_with_string_values) + switch path { + case "/sensor": + return stream.Send(data) + case "/sensor_with_prefix": + return stream.Send(dataWithPrefix) + case "/sensor_with_multiple_tags": + return stream.Send(dataWithMultipleTags) + case "/sensor_with_string_values": + return stream.Send(dataWithStringValues) } return nil } -func (s *openConfigTelemetryServer) CancelTelemetrySubscription(ctx context.Context, req *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) { +func (s *openConfigTelemetryServer) CancelTelemetrySubscription(_ context.Context, _ *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(ctx context.Context, req *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) { +func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(_ context.Context, _ *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetryOperationalState(ctx context.Context, req *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) { +func (s *openConfigTelemetryServer) GetTelemetryOperationalState(_ context.Context, _ *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetDataEncodings(ctx context.Context, req *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { +func (s *openConfigTelemetryServer) GetDataEncodings(_ context.Context, _ *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { return nil, nil } @@ -219,6 +221,8 @@ func TestMain(m *testing.M) { grpcServer := grpc.NewServer(opts...) telemetry.RegisterOpenConfigTelemetryServer(grpcServer, newServer()) go func() { + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive grpcServer.Serve(lis) }() defer grpcServer.Stop() diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index dec39cc32871b..ac04925a23d14 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -35,16 +35,41 @@ and use the old zookeeper connection method. # insecure_skip_verify = false ## SASL authentication credentials. These settings should typically be used - ## with TLS encryption enabled using the "enable_tls" option. + ## with TLS encryption enabled # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 0fd7d3693d48c..4462cd016766c 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -3,16 +3,15 @@ package kafka_consumer import ( "context" "fmt" - "log" "strings" "sync" "time" "github.com/Shopify/sarama" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/kafka" - "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) @@ -36,7 +35,6 @@ const sampleConfig = ` # version = "" ## Optional TLS Config - # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -44,16 +42,42 @@ const sampleConfig = ` # insecure_skip_verify = false ## SASL authentication credentials. These settings should typically be used - ## with TLS encryption enabled using the "enable_tls" option. + ## with TLS encryption enabled # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 + ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" @@ -83,7 +107,6 @@ const sampleConfig = ` const ( defaultMaxUndeliveredMessages = 1000 - defaultMaxMessageLen = 1000000 defaultConsumerGroup = "telegraf_metrics_consumers" reconnectDelay = 5 * time.Second ) @@ -93,7 +116,6 @@ type semaphore chan empty type KafkaConsumer struct { Brokers []string `toml:"brokers"` - ClientID string `toml:"client_id"` ConsumerGroup string `toml:"consumer_group"` MaxMessageLen int `toml:"max_message_len"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` @@ -101,13 +123,8 @@ type KafkaConsumer struct { BalanceStrategy string `toml:"balance_strategy"` Topics []string `toml:"topics"` TopicTag string `toml:"topic_tag"` - Version string `toml:"version"` - SASLPassword string `toml:"sasl_password"` - SASLUsername string `toml:"sasl_username"` - SASLVersion *int `toml:"sasl_version"` - EnableTLS *bool `toml:"enable_tls"` - tls.ClientConfig + kafka.ReadConfig Log telegraf.Logger `toml:"-"` @@ -157,58 +174,14 @@ func (k *KafkaConsumer) Init() error { } config := sarama.NewConfig() - config.Consumer.Return.Errors = true // Kafka version 0.10.2.0 is required for consumer groups. config.Version = sarama.V0_10_2_0 - if k.Version != "" { - version, err := sarama.ParseKafkaVersion(k.Version) - if err != nil { - return err - } - - config.Version = version - } - - if k.EnableTLS != nil && *k.EnableTLS { - config.Net.TLS.Enable = true - } - - tlsConfig, err := k.ClientConfig.TLSConfig() - if err != nil { + if err := k.SetConfig(config); err != nil { return err } - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - - // To maintain backwards compatibility, if the enable_tls option is not - // set TLS is enabled if a non-default TLS config is used. - if k.EnableTLS == nil { - k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") - config.Net.TLS.Enable = true - } - } - - if k.SASLUsername != "" && k.SASLPassword != "" { - config.Net.SASL.User = k.SASLUsername - config.Net.SASL.Password = k.SASLPassword - config.Net.SASL.Enable = true - - version, err := kafka.SASLVersion(config.Version, k.SASLVersion) - if err != nil { - return err - } - config.Net.SASL.Version = version - } - - if k.ClientID != "" { - config.ClientID = k.ClientID - } else { - config.ClientID = "Telegraf" - } - switch strings.ToLower(k.Offset) { case "oldest", "": config.Consumer.Offsets.Initial = sarama.OffsetOldest @@ -256,12 +229,14 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { go func() { defer k.wg.Done() for ctx.Err() == nil { - handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser) + handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser, k.Log) handler.MaxMessageLen = k.MaxMessageLen handler.TopicTag = k.TopicTag err := k.consumer.Consume(ctx, k.Topics, handler) if err != nil { acc.AddError(err) + // Ignore returned error as we cannot do anything about it anyway + //nolint:errcheck,revive internal.SleepContext(ctx, reconnectDelay) } } @@ -282,7 +257,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error { +func (k *KafkaConsumer) Gather(_ telegraf.Accumulator) error { return nil } @@ -298,12 +273,13 @@ type Message struct { session sarama.ConsumerGroupSession } -func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler { +func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser, log telegraf.Logger) *ConsumerGroupHandler { handler := &ConsumerGroupHandler{ acc: acc.WithTracking(maxUndelivered), sem: make(chan empty, maxUndelivered), undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered), parser: parser, + log: log, } return handler } @@ -321,6 +297,8 @@ type ConsumerGroupHandler struct { mu sync.Mutex undelivered map[telegraf.TrackingID]Message + + log telegraf.Logger } // Setup is called once when a new session is opened. It setups up the handler @@ -340,11 +318,11 @@ func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { } // Run processes any delivered metrics during the lifetime of the session. -func (h *ConsumerGroupHandler) run(ctx context.Context) error { +func (h *ConsumerGroupHandler) run(ctx context.Context) { for { select { case <-ctx.Done(): - return nil + return case track := <-h.acc.Delivered(): h.onDelivery(track) } @@ -357,7 +335,7 @@ func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { msg, ok := h.undelivered[track.ID()] if !ok { - log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + h.log.Errorf("Could not mark message delivered: %d", track.ID()) return } @@ -420,7 +398,7 @@ func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, for { err := h.Reserve(ctx) if err != nil { - return nil + return err } select { diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 01146e180a8c8..68fd9e0627bed 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -6,11 +6,13 @@ import ( "time" "github.com/Shopify/sarama" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type FakeConsumerGroup struct { @@ -22,10 +24,9 @@ type FakeConsumerGroup struct { errors chan error } -func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error { +func (g *FakeConsumerGroup) Consume(_ context.Context, _ []string, handler sarama.ConsumerGroupHandler) error { g.handler = handler - g.handler.Setup(nil) - return nil + return g.handler.Setup(nil) } func (g *FakeConsumerGroup) Errors() <-chan error { @@ -68,8 +69,12 @@ func TestInit(t *testing.T) { { name: "parses valid version string", plugin: &KafkaConsumer{ - Version: "1.0.0", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + Version: "1.0.0", + }, + }, + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.Version, sarama.V1_0_0_0) @@ -78,16 +83,24 @@ func TestInit(t *testing.T) { { name: "invalid version string", plugin: &KafkaConsumer{ - Version: "100", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + Version: "100", + }, + }, + Log: testutil.Logger{}, }, initError: true, }, { name: "custom client_id", plugin: &KafkaConsumer{ - ClientID: "custom", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientID: "custom", + }, + }, + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.ClientID, "custom") @@ -123,8 +136,12 @@ func TestInit(t *testing.T) { { name: "default tls with a tls config", plugin: &KafkaConsumer{ - ClientConfig: tls.ClientConfig{ - InsecureSkipVerify: true, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + }, }, Log: testutil.Logger{}, }, @@ -133,24 +150,17 @@ func TestInit(t *testing.T) { }, }, { - name: "disable tls", + name: "Insecure tls", plugin: &KafkaConsumer{ - EnableTLS: func() *bool { v := false; return &v }(), - ClientConfig: tls.ClientConfig{ - InsecureSkipVerify: true, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + }, }, Log: testutil.Logger{}, }, - check: func(t *testing.T, plugin *KafkaConsumer) { - require.False(t, plugin.config.Net.TLS.Enable) - }, - }, - { - name: "enable tls", - plugin: &KafkaConsumer{ - EnableTLS: func() *bool { v := true; return &v }(), - Log: testutil.Logger{}, - }, check: func(t *testing.T, plugin *KafkaConsumer) { require.True(t, plugin.config.Net.TLS.Enable) }, @@ -165,6 +175,8 @@ func TestInit(t *testing.T) { require.Error(t, err) return } + // No error path + require.NoError(t, err) tt.check(t, tt.plugin) }) @@ -203,21 +215,24 @@ func (s *FakeConsumerGroupSession) GenerationID() int32 { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { +func (s *FakeConsumerGroupSession) MarkOffset(_ string, _ int32, _ int64, _ string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { +func (s *FakeConsumerGroupSession) ResetOffset(_ string, _ int32, _ int64, _ string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { +func (s *FakeConsumerGroupSession) MarkMessage(_ *sarama.ConsumerMessage, _ string) { } func (s *FakeConsumerGroupSession) Context() context.Context { return s.ctx } +func (s *FakeConsumerGroupSession) Commit() { +} + type FakeConsumerGroupClaim struct { messages chan *sarama.ConsumerMessage } @@ -244,8 +259,8 @@ func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { func TestConsumerGroupHandler_Lifecycle(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} - cg := NewConsumerGroupHandler(acc, 1, parser) + parser := value.NewValueParser("cpu", "int", "", nil) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -260,8 +275,13 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { require.NoError(t, err) cancel() - err = cg.ConsumeClaim(session, &claim) - require.NoError(t, err) + // This produces a flappy testcase probably due to a race between context cancellation and consumption. + // Furthermore, it is not clear what the outcome of this test should be... + // err = cg.ConsumeClaim(session, &claim) + //require.NoError(t, err) + // So stick with the line below for now. + //nolint:errcheck + cg.ConsumeClaim(session, &claim) err = cg.Cleanup(session) require.NoError(t, err) @@ -269,8 +289,8 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} - cg := NewConsumerGroupHandler(acc, 1, parser) + parser := value.NewValueParser("cpu", "int", "", nil) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -290,7 +310,8 @@ func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { go func() { err := cg.ConsumeClaim(session, claim) - require.NoError(t, err) + require.Error(t, err) + require.EqualValues(t, "context canceled", err.Error()) }() acc.Wait(1) @@ -315,11 +336,12 @@ func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { func TestConsumerGroupHandler_Handle(t *testing.T) { tests := []struct { - name string - maxMessageLen int - topicTag string - msg *sarama.ConsumerMessage - expected []telegraf.Metric + name string + maxMessageLen int + topicTag string + msg *sarama.ConsumerMessage + expected []telegraf.Metric + expectedHandleError string }{ { name: "happy path", @@ -345,7 +367,8 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { Topic: "telegraf", Value: []byte("12345"), }, - expected: []telegraf.Metric{}, + expected: []telegraf.Metric{}, + expectedHandleError: "message exceeds max_message_len (actual 5, max 4)", }, { name: "parse error", @@ -353,7 +376,8 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { Topic: "telegraf", Value: []byte("not an integer"), }, - expected: []telegraf.Metric{}, + expected: []telegraf.Metric{}, + expectedHandleError: "strconv.Atoi: parsing \"integer\": invalid syntax", }, { name: "add topic tag", @@ -379,16 +403,22 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} - cg := NewConsumerGroupHandler(acc, 1, parser) + parser := value.NewValueParser("cpu", "int", "", nil) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) cg.MaxMessageLen = tt.maxMessageLen cg.TopicTag = tt.topicTag ctx := context.Background() session := &FakeConsumerGroupSession{ctx: ctx} - cg.Reserve(ctx) - cg.Handle(session, tt.msg) + require.NoError(t, cg.Reserve(ctx)) + err := cg.Handle(session, tt.msg) + if tt.expectedHandleError != "" { + require.Error(t, err) + require.EqualValues(t, tt.expectedHandleError, err.Error()) + } else { + require.NoError(t, err) + } testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 2f0c219ea8647..86ccaa4c1dc09 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -1,5 +1,7 @@ # Kafka Consumer Legacy Input Plugin +### Deprecated in version 1.4. Please use [Kafka Consumer input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer). + The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka topic and adds messages to InfluxDB. The plugin assumes messages follow the line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index 939fc8850ef5f..ab19e0875820a 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -140,11 +140,11 @@ func (k *Kafka) receiver() { return case err := <-k.errs: if err != nil { - k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err)) + k.acc.AddError(fmt.Errorf("consumer Error: %s", err)) } case msg := <-k.in: if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { - k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)", + k.acc.AddError(fmt.Errorf("message longer than max_message_len (%d > %d)", len(msg.Value), k.MaxMessageLen)) } else { metrics, err := k.parser.Parse(msg.Value) @@ -161,8 +161,11 @@ func (k *Kafka) receiver() { // TODO(cam) this locking can be removed if this PR gets merged: // https://github.com/wvanbergen/kafka/pull/84 k.Lock() - k.Consumer.CommitUpto(msg) + err := k.Consumer.CommitUpto(msg) k.Unlock() + if err != nil { + k.acc.AddError(fmt.Errorf("committing to consumer failed: %v", err)) + } } } } @@ -173,11 +176,11 @@ func (k *Kafka) Stop() { defer k.Unlock() close(k.done) if err := k.Consumer.Close(); err != nil { - k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error())) + k.acc.AddError(fmt.Errorf("error closing consumer: %s", err.Error())) } } -func (k *Kafka) Gather(acc telegraf.Accumulator) error { +func (k *Kafka) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 31bea2210b741..473c5b9740847 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -6,11 +6,10 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) func TestReadsMetricsFromKafka(t *testing.T) { @@ -51,7 +50,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { var acc testutil.Accumulator // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") + require.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := k.Start(&acc); err != nil { t.Fatal(err.Error()) } else { @@ -65,25 +64,26 @@ func TestReadsMetricsFromKafka(t *testing.T) { require.NoError(t, err) if len(acc.Metrics) == 1 { point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ "host": "server01", "direction": "in", "region": "us-west", }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } else { t.Errorf("No points found in accumulator, expected 1") } } -// Waits for the metric that was sent to the kafka broker to arrive at the kafka -// consumer +//nolint:unused // Used in skipped tests +// Waits for the metric that was sent to the kafka broker to arrive at the kafka consumer func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) counter := 0 + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go index 8037f49a053b5..ad8e372941ebb 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go @@ -4,11 +4,12 @@ import ( "strings" "testing" + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/Shopify/sarama" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -46,7 +47,7 @@ func TestRunParser(t *testing.T) { in <- saramaMsg(testMsg) acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) } // Test that the parser ignores invalid messages @@ -61,7 +62,7 @@ func TestRunParserInvalidMsg(t *testing.T) { in <- saramaMsg(invalidMsg) acc.WaitError(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } // Test that overlong messages are dropped @@ -78,7 +79,7 @@ func TestDropOverlongMsg(t *testing.T) { in <- saramaMsg(overlongMsg) acc.WaitError(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } // Test that the parser parses kafka messages into points @@ -93,9 +94,9 @@ func TestRunParserAndGather(t *testing.T) { in <- saramaMsg(testMsg) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } @@ -112,9 +113,9 @@ func TestRunParserAndGatherGraphite(t *testing.T) { in <- saramaMsg(testMsgGraphite) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } @@ -134,9 +135,9 @@ func TestRunParserAndGatherJSON(t *testing.T) { in <- saramaMsg(testMsgJSON) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 2) + require.Equal(t, acc.NFields(), 2) acc.AssertContainsFields(t, "kafka_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index dd3303a7419d3..b2e8da4cc0ef0 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -19,7 +19,7 @@ const ( type Kapacitor struct { URLs []string `toml:"urls"` - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -51,7 +51,7 @@ func (*Kapacitor) SampleConfig() string { func (k *Kapacitor) Gather(acc telegraf.Accumulator) error { if k.client == nil { - client, err := k.createHttpClient() + client, err := k.createHTTPClient() if err != nil { return err } @@ -73,7 +73,7 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kapacitor) createHttpClient() (*http.Client, error) { +func (k *Kapacitor) createHTTPClient() (*http.Client, error) { tlsCfg, err := k.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -83,7 +83,7 @@ func (k *Kapacitor) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: k.Timeout.Duration, + Timeout: time.Duration(k.Timeout), } return client, nil @@ -216,13 +216,10 @@ func (k *Kapacitor) gatherURL( if s.Kapacitor != nil { for _, obj := range *s.Kapacitor { - // Strip out high-cardinality or duplicative tags excludeTags := []string{"host", "cluster_id", "server_id"} for _, key := range excludeTags { - if _, ok := obj.Tags[key]; ok { - delete(obj.Tags, key) - } + delete(obj.Tags, key) } // Convert time-related string field to int @@ -250,7 +247,7 @@ func init() { inputs.Add("kapacitor", func() telegraf.Input { return &Kapacitor{ URLs: []string{defaultURL}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/kapacitor/kapacitor_test.go b/plugins/inputs/kapacitor/kapacitor_test.go index cae1f9ce30e77..163af10601f0a 100644 --- a/plugins/inputs/kapacitor/kapacitor_test.go +++ b/plugins/inputs/kapacitor/kapacitor_test.go @@ -74,7 +74,8 @@ func TestKapacitor(t *testing.T) { func TestMissingStats(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(`{}`)) + _, err := w.Write([]byte(`{}`)) + require.NoError(t, err) })) defer server.Close() @@ -83,7 +84,7 @@ func TestMissingStats(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.False(t, acc.HasField("kapacitor_memstats", "alloc_bytes")) require.True(t, acc.HasField("kapacitor", "num_tasks")) @@ -92,7 +93,8 @@ func TestMissingStats(t *testing.T) { func TestErrorHandling(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte("not json")) + _, err := w.Write([]byte("not json")) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -104,7 +106,7 @@ func TestErrorHandling(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) acc.WaitError(1) require.Equal(t, uint64(0), acc.NMetrics()) } @@ -120,7 +122,7 @@ func TestErrorHandling404(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) acc.WaitError(1) require.Equal(t, uint64(0), acc.NMetrics()) } diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 461c9564a38e9..c16c68bf44bd1 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel @@ -5,7 +6,6 @@ package kernel import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -16,11 +16,11 @@ import ( // /proc/stat file line prefixes to gather stats on: var ( - interrupts = []byte("intr") - context_switches = []byte("ctxt") - processes_forked = []byte("processes") - disk_pages = []byte("page") - boot_time = []byte("btime") + interrupts = []byte("intr") + contextSwitches = []byte("ctxt") + processesForked = []byte("processes") + diskPages = []byte("page") + bootTime = []byte("btime") ) type Kernel struct { @@ -35,13 +35,12 @@ func (k *Kernel) Description() string { func (k *Kernel) SampleConfig() string { return "" } func (k *Kernel) Gather(acc telegraf.Accumulator) error { - data, err := k.getProcStat() if err != nil { return err } - entropyData, err := ioutil.ReadFile(k.entropyStatFile) + entropyData, err := os.ReadFile(k.entropyStatFile) if err != nil { return err } @@ -54,7 +53,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) - fields["entropy_avail"] = int64(entropyValue) + fields["entropy_avail"] = entropyValue dataFields := bytes.Fields(data) for i, field := range dataFields { @@ -64,26 +63,26 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - fields["interrupts"] = int64(m) - case bytes.Equal(field, context_switches): + fields["interrupts"] = m + case bytes.Equal(field, contextSwitches): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["context_switches"] = int64(m) - case bytes.Equal(field, processes_forked): + fields["context_switches"] = m + case bytes.Equal(field, processesForked): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["processes_forked"] = int64(m) - case bytes.Equal(field, boot_time): + fields["processes_forked"] = m + case bytes.Equal(field, bootTime): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["boot_time"] = int64(m) - case bytes.Equal(field, disk_pages): + fields["boot_time"] = m + case bytes.Equal(field, diskPages): in, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err @@ -92,8 +91,8 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - fields["disk_pages_in"] = int64(in) - fields["disk_pages_out"] = int64(out) + fields["disk_pages_in"] = in + fields["disk_pages_out"] = out } } @@ -104,12 +103,12 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { func (k *Kernel) getProcStat() ([]byte, error) { if _, err := os.Stat(k.statFile); os.IsNotExist(err) { - return nil, fmt.Errorf("kernel: %s does not exist!", k.statFile) + return nil, fmt.Errorf("kernel: %s does not exist", k.statFile) } else if err != nil { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel/kernel_notlinux.go b/plugins/inputs/kernel/kernel_notlinux.go index 05f6e55c453c5..838a97071a6d4 100644 --- a/plugins/inputs/kernel/kernel_notlinux.go +++ b/plugins/inputs/kernel/kernel_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index d356f43802798..f174017fad7b9 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -1,20 +1,20 @@ +//go:build linux // +build linux package kernel import ( - "io/ioutil" "os" "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFullProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Full)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Full)) + tmpfile := makeFakeStatFile(t, []byte(statFileFull)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFileFull)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -24,8 +24,7 @@ func TestFullProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "boot_time": int64(1457505775), @@ -40,8 +39,8 @@ func TestFullProcFile(t *testing.T) { } func TestPartialProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Partial)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Partial)) + tmpfile := makeFakeStatFile(t, []byte(statFilePartial)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFilePartial)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -51,8 +50,7 @@ func TestPartialProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "boot_time": int64(1457505775), @@ -66,8 +64,8 @@ func TestPartialProcFile(t *testing.T) { } func TestInvalidProcFile1(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Invalid)) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFileInvalid)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -78,11 +76,12 @@ func TestInvalidProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") } func TestInvalidProcFile2(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid2)) defer os.Remove(tmpfile) k := Kernel{ @@ -91,12 +90,13 @@ func TestInvalidProcFile2(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "no such file") } func TestNoProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) - os.Remove(tmpfile) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid2)) + require.NoError(t, os.Remove(tmpfile)) k := Kernel{ statFile: tmpfile, @@ -104,11 +104,11 @@ func TestNoProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "does not exist") + require.Error(t, err) + require.Contains(t, err.Error(), "does not exist") } -const statFile_Full = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileFull = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -122,7 +122,7 @@ swap 1 0 entropy_avail 1024 ` -const statFile_Partial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFilePartial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -134,7 +134,7 @@ page 5741 1808 ` // missing btime measurement -const statFile_Invalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileInvalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -149,7 +149,7 @@ entropy_avail 1024 ` // missing second page measurement -const statFile_Invalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileInvalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -161,24 +161,20 @@ softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545 entropy_avail 1024 2048 ` -const entropyStatFile_Full = `1024` +const entropyStatFileFull = `1024` -const entropyStatFile_Partial = `1024` +const entropyStatFilePartial = `1024` -const entropyStatFile_Invalid = `` +const entropyStatFileInvalid = `` -func makeFakeStatFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_test") - if err != nil { - panic(err) - } +func makeFakeStatFile(t *testing.T, content []byte) string { + tmpfile, err := os.CreateTemp("", "kernel_test") + require.NoError(t, err) - if _, err := tmpfile.Write(content); err != nil { - panic(err) - } - if err := tmpfile.Close(); err != nil { - panic(err) - } + _, err = tmpfile.Write(content) + require.NoError(t, err) + + require.NoError(t, tmpfile.Close()) return tmpfile.Name() } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index ffc56d97d154e..95a7a5e32f1e0 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel_vmstat @@ -5,7 +6,6 @@ package kernel_vmstat import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" @@ -35,7 +35,6 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { dataFields := bytes.Fields(data) for i, field := range dataFields { - // dataFields is an array of {"stat1_name", "stat1_value", "stat2_name", // "stat2_value", ...} // We only want the even number index as that contain the stat name. @@ -46,7 +45,7 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { return err } - fields[string(field)] = int64(m) + fields[string(field)] = m } } @@ -56,12 +55,12 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { func (k *KernelVmstat) getProcVmstat() ([]byte, error) { if _, err := os.Stat(k.statFile); os.IsNotExist(err) { - return nil, fmt.Errorf("kernel_vmstat: %s does not exist!", k.statFile) + return nil, fmt.Errorf("kernel_vmstat: %s does not exist", k.statFile) } else if err != nil { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go index 11a5d2e553dff..d687b13a9e72d 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel_vmstat diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index bba615a743e54..6590e3febd19c 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -1,19 +1,19 @@ +//go:build linux // +build linux package kernel_vmstat import ( - "io/ioutil" "os" "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFullVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Full)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileFull)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -21,8 +21,7 @@ func TestFullVmStatProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "nr_free_pages": int64(78730), @@ -121,7 +120,7 @@ func TestFullVmStatProcFile(t *testing.T) { } func TestPartialVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Partial)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFilePartial)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -130,7 +129,7 @@ func TestPartialVmStatProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "unevictable_pgs_culled": int64(1531), @@ -151,7 +150,7 @@ func TestPartialVmStatProcFile(t *testing.T) { } func TestInvalidVmStatProcFile1(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileInvalid)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -160,12 +159,13 @@ func TestInvalidVmStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") } func TestNoVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) - os.Remove(tmpfile) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileInvalid)) + require.NoError(t, os.Remove(tmpfile)) k := KernelVmstat{ statFile: tmpfile, @@ -173,11 +173,11 @@ func TestNoVmStatProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "does not exist") + require.Error(t, err) + require.Contains(t, err.Error(), "does not exist") } -const vmStatFile_Full = `nr_free_pages 78730 +const vmStatFileFull = `nr_free_pages 78730 nr_inactive_anon 426259 nr_active_anon 2515657 nr_inactive_file 2366791 @@ -269,7 +269,7 @@ thp_collapse_alloc 24857 thp_collapse_alloc_failed 102214 thp_split 9817` -const vmStatFile_Partial = `unevictable_pgs_culled 1531 +const vmStatFilePartial = `unevictable_pgs_culled 1531 unevictable_pgs_scanned 0 unevictable_pgs_rescued 5426 unevictable_pgs_mlocked 6988 @@ -284,7 +284,7 @@ thp_collapse_alloc_failed 102214 thp_split 9817` // invalid thp_split measurement -const vmStatFile_Invalid = `unevictable_pgs_culled 1531 +const vmStatFileInvalid = `unevictable_pgs_culled 1531 unevictable_pgs_scanned 0 unevictable_pgs_rescued 5426 unevictable_pgs_mlocked 6988 @@ -298,18 +298,14 @@ thp_collapse_alloc 24857 thp_collapse_alloc_failed 102214 thp_split abcd` -func makeFakeVmStatFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test") - if err != nil { - panic(err) - } +func makeFakeVMStatFile(t *testing.T, content []byte) string { + tmpfile, err := os.CreateTemp("", "kernel_vmstat_test") + require.NoError(t, err) - if _, err := tmpfile.Write(content); err != nil { - panic(err) - } - if err := tmpfile.Close(); err != nil { - panic(err) - } + _, err = tmpfile.Write(content) + require.NoError(t, err) + + require.NoError(t, tmpfile.Close()) return tmpfile.Name() } diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index 73bf4a2981d63..a5002d5f21204 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -53,3 +53,18 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. ``` kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 ``` + +## Run example environment + +Requires the following tools: + +* [Docker](https://docs.docker.com/get-docker/) +* [Docker Compose](https://docs.docker.com/compose/install/) + +From the root of this project execute the following script: `./plugins/inputs/kibana/test_environment/run_test_env.sh` + +This will build the latest Telegraf and then start up Kibana and Elasticsearch, Telegraf will begin monitoring Kibana's status and write its results to the file `/tmp/metrics.out` in the Telegraf container. + +Then you can attach to the telegraf container to inspect the file `/tmp/metrics.out` to see if the status is being reported. + +The Visual Studio Code [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension provides an easy user interface to attach to the running container. \ No newline at end of file diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 98b81a91f52b9..55ffa1df845f9 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -12,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -104,7 +103,7 @@ type Kibana struct { Servers []string Username string Password string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -112,7 +111,7 @@ type Kibana struct { func NewKibana() *Kibana { return &Kibana{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -141,7 +140,7 @@ func (k *Kibana) Description() string { func (k *Kibana) Gather(acc telegraf.Accumulator) error { if k.client == nil { - client, err := k.createHttpClient() + client, err := k.createHTTPClient() if err != nil { return err @@ -166,7 +165,7 @@ func (k *Kibana) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kibana) createHttpClient() (*http.Client, error) { +func (k *Kibana) createHTTPClient() (*http.Client, error) { tlsCfg, err := k.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -176,18 +175,17 @@ func (k *Kibana) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: k.Timeout.Duration, + Timeout: time.Duration(k.Timeout), } return client, nil } -func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) error { - +func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error { kibanaStatus := &kibanaStatus{} - url := baseUrl + statusPath + url := baseURL + statusPath - host, err := k.gatherJsonData(url, kibanaStatus) + host, err := k.gatherJSONData(url, kibanaStatus) if err != nil { return err } @@ -229,15 +227,13 @@ func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) er fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes - } - acc.AddFields("kibana", fields, tags) return nil } -func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err error) { +func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) { request, err := http.NewRequest("GET", url, nil) if err != nil { return "", fmt.Errorf("unable to create new request '%s': %v", url, err) @@ -256,7 +252,7 @@ func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err err if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go index 3dfed9edfa9a2..565d9b1c79416 100644 --- a/plugins/inputs/kibana/kibana_test.go +++ b/plugins/inputs/kibana/kibana_test.go @@ -1,7 +1,7 @@ package kibana import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -46,7 +46,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf b/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf new file mode 100644 index 0000000000000..c67f346b5c170 --- /dev/null +++ b/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf @@ -0,0 +1,75 @@ +# Telegraf Configuration for basic Kibana example + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +# Read status information from one or more Kibana servers +[[inputs.kibana]] + ## Specify a list of one or more Kibana servers + servers = ["http://kib01:5601"] + + ## Timeout for HTTP requests + timeout = "5s" diff --git a/plugins/inputs/kibana/test_environment/docker-compose.yml b/plugins/inputs/kibana/test_environment/docker-compose.yml new file mode 100644 index 0000000000000..8aa6db00df009 --- /dev/null +++ b/plugins/inputs/kibana/test_environment/docker-compose.yml @@ -0,0 +1,48 @@ +## Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-dev-mode +version: '2.2' +services: + es01: + image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1 + container_name: es01 + environment: + - node.name=es01 + - cluster.name=es-docker-cluster + - cluster.initial_master_nodes=es01 + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + ulimits: + memlock: + soft: -1 + hard: -1 + volumes: + - data01:/usr/share/elasticsearch/data + ports: + - 9200:9200 + networks: + - elastic + + kib01: + image: docker.elastic.co/kibana/kibana:7.10.1 + container_name: kib01 + ports: + - 5601:5601 + environment: + ELASTICSEARCH_URL: http://es01:9200 + ELASTICSEARCH_HOSTS: http://es01:9200 + networks: + - elastic + + telegraf: + image: local_telegraf + volumes: + - ./basic_kibana_telegraf.conf:/etc/telegraf/telegraf.conf:ro + networks: + - elastic + +volumes: + data01: + driver: local + +networks: + elastic: + driver: bridge diff --git a/plugins/inputs/kibana/test_environment/run_test_env.sh b/plugins/inputs/kibana/test_environment/run_test_env.sh new file mode 100755 index 0000000000000..8ea741ac3f98e --- /dev/null +++ b/plugins/inputs/kibana/test_environment/run_test_env.sh @@ -0,0 +1,3 @@ +docker build -t local_telegraf -f scripts/alpine.docker . + +docker-compose -f plugins/inputs/kibana/test_environment/docker-compose.yml up diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md index 7896557ac6cf5..ba1a7580fd29b 100644 --- a/plugins/inputs/kinesis_consumer/README.md +++ b/plugins/inputs/kinesis_consumer/README.md @@ -13,16 +13,19 @@ and creates metrics using one of the supported [input data formats][]. ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile # access_key = "" # secret_key = "" # token = "" # role_arn = "" + # web_identity_token_file = "" + # role_session_name = "" # profile = "" # shared_credential_file = "" @@ -54,6 +57,15 @@ and creates metrics using one of the supported [input data formats][]. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## + ## The content encoding of the data from kinesis + ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" + ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws + ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding + ## is done automatically by the golang sdk, as data is read from kinesis) + ## + # content_encoding = "identity" + ## Optional ## Configuration for a dynamodb checkpoint [inputs.kinesis_consumer.checkpoint_dynamodb] diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 6a3b1c8301a48..4ff66ed1d2aaf 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -1,17 +1,21 @@ package kinesis_consumer import ( + "bytes" + "compress/gzip" + "compress/zlib" "context" "fmt" + "io" "math/big" "strings" "sync" "time" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/kinesis" consumer "github.com/harlow/kinesis-consumer" - "github.com/harlow/kinesis-consumer/checkpoint/ddb" + "github.com/harlow/kinesis-consumer/store/ddb" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -26,36 +30,32 @@ type ( } KinesisConsumer struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` StreamName string `toml:"streamname"` ShardIteratorType string `toml:"shard_iterator_type"` DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + ContentEncoding string `toml:"content_encoding"` Log telegraf.Logger cons *consumer.Consumer parser parsers.Parser cancel context.CancelFunc - ctx context.Context acc telegraf.TrackingAccumulator sem chan struct{} - checkpoint consumer.Checkpoint + checkpoint consumer.Store checkpoints map[string]checkpoint records map[telegraf.TrackingID]string checkpointTex sync.Mutex recordsTex sync.Mutex wg sync.WaitGroup + processContentEncodingFunc processContent + lastSeqNum *big.Int + + internalaws.CredentialConfig } checkpoint struct { @@ -68,6 +68,8 @@ const ( defaultMaxUndeliveredMessages = 1000 ) +type processContent func([]byte) ([]byte, error) + // this is the largest sequence number allowed - https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SequenceNumberRange.html var maxSeq = strToBint(strings.Repeat("9", 129)) @@ -77,16 +79,19 @@ var sampleConfig = ` ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile # access_key = "" # secret_key = "" # token = "" # role_arn = "" + # web_identity_token_file = "" + # role_session_name = "" # profile = "" # shared_credential_file = "" @@ -118,6 +123,15 @@ var sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## + ## The content encoding of the data from kinesis + ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" + ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws + ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding + ## is done automatically by the golang sdk, as data is read from kinesis) + ## + # content_encoding = "identity" + ## Optional ## Configuration for a dynamodb checkpoint [inputs.kinesis_consumer.checkpoint_dynamodb] @@ -139,35 +153,19 @@ func (k *KinesisConsumer) SetParser(parser parsers.Parser) { } func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { - credentialConfig := &internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, + cfg, err := k.CredentialConfig.Credentials() + if err != nil { + return err } - configProvider := credentialConfig.Credentials() - client := kinesis.New(configProvider) + client := kinesis.NewFromConfig(cfg) - k.checkpoint = &noopCheckpoint{} + k.checkpoint = &noopStore{} if k.DynamoDB != nil { var err error k.checkpoint, err = ddb.New( k.DynamoDB.AppName, k.DynamoDB.TableName, - ddb.WithDynamoClient(dynamodb.New((&internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, - }).Credentials())), + ddb.WithDynamoClient(dynamodb.NewFromConfig(cfg)), ddb.WithMaxInterval(time.Second*10), ) if err != nil { @@ -179,7 +177,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.StreamName, consumer.WithClient(client), consumer.WithShardIteratorType(k.ShardIteratorType), - consumer.WithCheckpoint(k), + consumer.WithStore(k), ) if err != nil { return err @@ -204,20 +202,20 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.wg.Add(1) go func() { defer k.wg.Done() - err := k.cons.Scan(ctx, func(r *consumer.Record) consumer.ScanStatus { + err := k.cons.Scan(ctx, func(r *consumer.Record) error { select { case <-ctx.Done(): - return consumer.ScanStatus{Error: ctx.Err()} + return ctx.Err() case k.sem <- struct{}{}: break } err := k.onMessage(k.acc, r) if err != nil { - k.sem <- struct{}{} - return consumer.ScanStatus{Error: err} + <-k.sem + k.Log.Errorf("Scan parser error: %s", err.Error()) } - return consumer.ScanStatus{} + return nil }) if err != nil { k.cancel() @@ -239,7 +237,11 @@ func (k *KinesisConsumer) Start(ac telegraf.Accumulator) error { } func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, r *consumer.Record) error { - metrics, err := k.parser.Parse(r.Data) + data, err := k.processContentEncodingFunc(r.Data) + if err != nil { + return err + } + metrics, err := k.parser.Parse(data) if err != nil { return err } @@ -284,7 +286,9 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) { } k.lastSeqNum = strToBint(sequenceNum) - k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum) + if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { + k.Log.Debug("Setting checkpoint failed: %v", err) + } } else { k.Log.Debug("Metric group failed to process") } @@ -316,13 +320,13 @@ func (k *KinesisConsumer) Gather(acc telegraf.Accumulator) error { return nil } -// Get wraps the checkpoint's Get function (called by consumer library) -func (k *KinesisConsumer) Get(streamName, shardID string) (string, error) { - return k.checkpoint.Get(streamName, shardID) +// Get wraps the checkpoint's GetCheckpoint function (called by consumer library) +func (k *KinesisConsumer) GetCheckpoint(streamName, shardID string) (string, error) { + return k.checkpoint.GetCheckpoint(streamName, shardID) } -// Set wraps the checkpoint's Set function (called by consumer library) -func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error { +// Set wraps the checkpoint's SetCheckpoint function (called by consumer library) +func (k *KinesisConsumer) SetCheckpoint(streamName, shardID, sequenceNumber string) error { if sequenceNumber == "" { return fmt.Errorf("sequence number should not be empty") } @@ -334,10 +338,50 @@ func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error return nil } -type noopCheckpoint struct{} +func processGzip(data []byte) ([]byte, error) { + zipData, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zipData.Close() + return io.ReadAll(zipData) +} + +func processZlib(data []byte) ([]byte, error) { + zlibData, err := zlib.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zlibData.Close() + return io.ReadAll(zlibData) +} + +func processNoOp(data []byte) ([]byte, error) { + return data, nil +} + +func (k *KinesisConsumer) configureProcessContentEncodingFunc() error { + switch k.ContentEncoding { + case "gzip": + k.processContentEncodingFunc = processGzip + case "zlib": + k.processContentEncodingFunc = processZlib + case "none", "identity", "": + k.processContentEncodingFunc = processNoOp + default: + return fmt.Errorf("unknown content encoding %q", k.ContentEncoding) + } + return nil +} + +func (k *KinesisConsumer) Init() error { + return k.configureProcessContentEncodingFunc() +} + +type noopStore struct{} -func (n noopCheckpoint) Set(string, string, string) error { return nil } -func (n noopCheckpoint) Get(string, string) (string, error) { return "", nil } +func (n noopStore) SetCheckpoint(string, string, string) error { return nil } +func (n noopStore) GetCheckpoint(string, string) (string, error) { return "", nil } func init() { negOne, _ = new(big.Int).SetString("-1", 10) @@ -347,6 +391,7 @@ func init() { ShardIteratorType: "TRIM_HORIZON", MaxUndeliveredMessages: defaultMaxUndeliveredMessages, lastSeqNum: maxSeq, + ContentEncoding: "identity", } }) } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go new file mode 100644 index 0000000000000..1e0d935e03cc6 --- /dev/null +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -0,0 +1,210 @@ +package kinesis_consumer + +import ( + "encoding/base64" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + consumer "github.com/harlow/kinesis-consumer" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/testutil" +) + +func TestKinesisConsumer_onMessage(t *testing.T) { + zlibBytpes, _ := base64.StdEncoding.DecodeString("eF5FjlFrgzAUhf9KuM+2aNB2zdsQ2xe3whQGW8qIeqdhaiSJK0P874u1Y4+Hc/jON0GHxoga858BgUF8fs5fzunHU5Jlj6cEPFDXHvXStGqsrsKWTapq44pW1SetxsF1a8qsRtGt0YyFKbUcrFT9UbYWtQH2frntkm/s7RInkNU6t9JpWNE5WBAFPo3CcHeg+9D703OziUOhCg6MQ/yakrspuZsyEjdYfsm+Jg2K1jZEfZLKQWUvFglylBobZXDLwSP8//EGpD4NNj7dUJpT6hQY3W33h/AhCt84zDBf5l/MDl08") + gzippedBytes, _ := base64.StdEncoding.DecodeString("H4sIAAFXNGAAA0WOUWuDMBSF/0q4z7Zo0HbN2xDbF7fCFAZbyoh6p2FqJIkrQ/zvi7Vjj4dz+M43QYfGiBrznwGBQXx+zl/O6cdTkmWPpwQ8UNce9dK0aqyuwpZNqmrjilbVJ63GwXVryqxG0a3RjIUptRysVP1Rtha1AfZ+ue2Sb+ztEieQ1Tq30mlY0TlYEAU+jcJwd6D70PvTc7OJQ6EKDoxD/JqSuym5mzISN1h+yb4mDYrWNkR9kspBZS8WCXKUGhtlcMvBI/z/8QakPg02Pt1QmlPqFBjdbfeH8CEK3zjMMF/mX0TaxZUpAQAA") + notZippedBytes := []byte(`{"messageType":"CONTROL_MESSAGE","owner":"CloudwatchLogs","logGroup":"","logStream":"", +"subscriptionFilters":[],"logEvents":[ + {"id":"","timestamp":1510254469274,"message":"{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"},"}, + {"id":"","timestamp":1510254469274,"message":"{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}"} +]}`) + parser, _ := json.New(&json.Config{ + MetricName: "json_test", + Query: "logEvents", + StringFields: []string{"message"}, + }) + + type fields struct { + ContentEncoding string + parser parsers.Parser + records map[telegraf.TrackingID]string + } + type args struct { + r *consumer.Record + } + type expected struct { + numberOfMetrics int + messageContains string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + expected expected + }{ + { + name: "test no compression", + fields: fields{ + ContentEncoding: "none", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via empty string for ContentEncoding", + fields: fields{ + ContentEncoding: "", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via identity ContentEncoding", + fields: fields{ + ContentEncoding: "identity", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via no ContentEncoding", + fields: fields{ + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test gzip compression", + fields: fields{ + ContentEncoding: "gzip", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: gzippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 1, + }, + }, + { + name: "test zlib compression", + fields: fields{ + ContentEncoding: "zlib", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: zlibBytpes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 1, + }, + }, + } + + k := &KinesisConsumer{ + ContentEncoding: "notsupported", + } + err := k.Init() + require.NotNil(t, err) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + k := &KinesisConsumer{ + ContentEncoding: tt.fields.ContentEncoding, + parser: tt.fields.parser, + records: tt.fields.records, + } + err := k.Init() + require.Nil(t, err) + + acc := testutil.Accumulator{} + if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { + t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) + } + + require.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) + + for _, metric := range acc.Metrics { + if logEventMessage, ok := metric.Fields["message"]; ok { + require.Contains(t, logEventMessage.(string), tt.expected.messageContains) + } else { + t.Errorf("Expect logEvents to be present") + } + } + }) + } +} diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md new file mode 100644 index 0000000000000..518dd5d7f3720 --- /dev/null +++ b/plugins/inputs/knx_listener/README.md @@ -0,0 +1,66 @@ +# KNX Input Plugin + +The KNX input plugin that listens for messages on the KNX home-automation bus. +This plugin connects to the KNX bus via a KNX-IP interface. +Information about supported KNX message datapoint types can be found at the +underlying "knx-go" project site (https://github.com/vapourismo/knx-go). + +### Configuration + +This is a sample config for the plugin. + +```toml +# Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +[[inputs.knx_listener]] + ## Type of KNX-IP interface. + ## Can be either "tunnel" or "router". + # service_type = "tunnel" + + ## Address of the KNX-IP interface. + service_address = "localhost:3671" + + ## Measurement definition(s) + # [[inputs.knx_listener.measurement]] + # ## Name of the measurement + # name = "temperature" + # ## Datapoint-Type (DPT) of the KNX messages + # dpt = "9.001" + # ## List of Group-Addresses (GAs) assigned to the measurement + # addresses = ["5/5/1"] + + # [[inputs.knx_listener.measurement]] + # name = "illumination" + # dpt = "9.004" + # addresses = ["5/5/3"] +``` + +#### Measurement configurations + +Each measurement contains only one datapoint-type (DPT) and assigns a list of +addresses to this measurement. You can, for example group all temperature sensor +messages within a "temperature" measurement. However, you are free to split +messages of one datapoint-type to multiple measurements. + +**NOTE: You should not assign a group-address (GA) to multiple measurements!** + +### Metrics + +Received KNX data is stored in the named measurement as configured above using +the "value" field. Additional to the value, there are the following tags added +to the datapoint: + - "groupaddress": KNX group-address corresponding to the value + - "unit": unit of the value + - "source": KNX physical address sending the value + +To find out about the datatype of the datapoint please check your KNX project, +the KNX-specification or the "knx-go" project for the corresponding DPT. + +### Example Output + +This section shows example output in Line Protocol format. + +``` +illumination,groupaddress=5/5/4,host=Hugin,source=1.1.12,unit=lux value=17.889999389648438 1582132674999013274 +temperature,groupaddress=5/5/1,host=Hugin,source=1.1.8,unit=°C value=17.799999237060547 1582132663427587361 +windowopen,groupaddress=1/0/1,host=Hugin,source=1.1.3 value=true 1582132630425581320 +``` diff --git a/plugins/inputs/knx_listener/knx_dummy_interface.go b/plugins/inputs/knx_listener/knx_dummy_interface.go new file mode 100644 index 0000000000000..1f897c4d99baa --- /dev/null +++ b/plugins/inputs/knx_listener/knx_dummy_interface.go @@ -0,0 +1,28 @@ +package knx_listener + +import ( + "github.com/vapourismo/knx-go/knx" +) + +type KNXDummyInterface struct { + inbound chan knx.GroupEvent +} + +func NewDummyInterface() (di KNXDummyInterface, err error) { + di, err = KNXDummyInterface{}, nil + di.inbound = make(chan knx.GroupEvent) + + return di, err +} + +func (di *KNXDummyInterface) Send(event knx.GroupEvent) { + di.inbound <- event +} + +func (di *KNXDummyInterface) Inbound() <-chan knx.GroupEvent { + return di.inbound +} + +func (di *KNXDummyInterface) Close() { + close(di.inbound) +} diff --git a/plugins/inputs/knx_listener/knx_listener.go b/plugins/inputs/knx_listener/knx_listener.go new file mode 100644 index 0000000000000..3896d649b4055 --- /dev/null +++ b/plugins/inputs/knx_listener/knx_listener.go @@ -0,0 +1,201 @@ +package knx_listener + +import ( + "fmt" + "reflect" + "sync" + + "github.com/vapourismo/knx-go/knx" + "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type KNXInterface interface { + Inbound() <-chan knx.GroupEvent + Close() +} + +type addressTarget struct { + measurement string + datapoint dpt.DatapointValue +} + +type Measurement struct { + Name string + Dpt string + Addresses []string +} + +type KNXListener struct { + ServiceType string `toml:"service_type"` + ServiceAddress string `toml:"service_address"` + Measurements []Measurement `toml:"measurement"` + Log telegraf.Logger `toml:"-"` + + client KNXInterface + gaTargetMap map[string]addressTarget + gaLogbook map[string]bool + + acc telegraf.Accumulator + wg sync.WaitGroup +} + +func (kl *KNXListener) Description() string { + return "Listener capable of handling KNX bus messages provided through a KNX-IP Interface." +} + +func (kl *KNXListener) SampleConfig() string { + return ` + ## Type of KNX-IP interface. + ## Can be either "tunnel" or "router". + # service_type = "tunnel" + + ## Address of the KNX-IP interface. + service_address = "localhost:3671" + + ## Measurement definition(s) + # [[inputs.knx_listener.measurement]] + # ## Name of the measurement + # name = "temperature" + # ## Datapoint-Type (DPT) of the KNX messages + # dpt = "9.001" + # ## List of Group-Addresses (GAs) assigned to the measurement + # addresses = ["5/5/1"] + + # [[inputs.knx_listener.measurement]] + # name = "illumination" + # dpt = "9.004" + # addresses = ["5/5/3"] +` +} + +func (kl *KNXListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (kl *KNXListener) Start(acc telegraf.Accumulator) error { + // Store the accumulator for later use + kl.acc = acc + + // Setup a logbook to track unknown GAs to avoid log-spamming + kl.gaLogbook = make(map[string]bool) + + // Construct the mapping of Group-addresses (GAs) to DPTs and the name + // of the measurement + kl.gaTargetMap = make(map[string]addressTarget) + for _, m := range kl.Measurements { + kl.Log.Debugf("Group-address mapping for measurement %q:", m.Name) + for _, ga := range m.Addresses { + kl.Log.Debugf(" %s --> %s", ga, m.Dpt) + if _, ok := kl.gaTargetMap[ga]; ok { + return fmt.Errorf("duplicate specification of address %q", ga) + } + d, ok := dpt.Produce(m.Dpt) + if !ok { + return fmt.Errorf("cannot create datapoint-type %q for address %q", m.Dpt, ga) + } + kl.gaTargetMap[ga] = addressTarget{m.Name, d} + } + } + + // Connect to the KNX-IP interface + kl.Log.Infof("Trying to connect to %q at %q", kl.ServiceType, kl.ServiceAddress) + switch kl.ServiceType { + case "tunnel": + c, err := knx.NewGroupTunnel(kl.ServiceAddress, knx.DefaultTunnelConfig) + if err != nil { + return err + } + kl.client = &c + case "router": + c, err := knx.NewGroupRouter(kl.ServiceAddress, knx.DefaultRouterConfig) + if err != nil { + return err + } + kl.client = &c + case "dummy": + c, err := NewDummyInterface() + if err != nil { + return err + } + kl.client = &c + default: + return fmt.Errorf("invalid interface type: %s", kl.ServiceAddress) + } + kl.Log.Infof("Connected!") + + // Listen to the KNX bus + kl.wg.Add(1) + go func() { + kl.wg.Done() + kl.listen() + }() + + return nil +} + +func (kl *KNXListener) Stop() { + if kl.client != nil { + kl.client.Close() + kl.wg.Wait() + } +} + +func (kl *KNXListener) listen() { + for msg := range kl.client.Inbound() { + // Match GA to DataPointType and measurement name + ga := msg.Destination.String() + target, ok := kl.gaTargetMap[ga] + if !ok { + if !kl.gaLogbook[ga] { + kl.Log.Infof("Ignoring message %+v for unknown GA %q", msg, ga) + kl.gaLogbook[ga] = true + } + continue + } + + // Extract the value from the data-frame + err := target.datapoint.Unpack(msg.Data) + if err != nil { + kl.Log.Errorf("Unpacking data failed: %v", err) + continue + } + kl.Log.Debugf("Matched GA %q to measurement %q with value %v", ga, target.measurement, target.datapoint) + + // Convert the DatapointValue interface back to its basic type again + // as otherwise telegraf will not push out the metrics and eat it + // silently. + var value interface{} + vi := reflect.Indirect(reflect.ValueOf(target.datapoint)) + switch vi.Kind() { + case reflect.Bool: + value = vi.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value = vi.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value = vi.Uint() + case reflect.Float32, reflect.Float64: + value = vi.Float() + default: + kl.Log.Errorf("Type conversion %v failed for address %q", vi.Kind(), ga) + continue + } + + // Compose the actual data to be pushed out + fields := map[string]interface{}{"value": value} + tags := map[string]string{ + "groupaddress": ga, + "unit": target.datapoint.(dpt.DatapointMeta).Unit(), + "source": msg.Source.String(), + } + kl.acc.AddFields(target.measurement, fields, tags) + } +} + +func init() { + inputs.Add("knx_listener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) + // Register for backward compatibility + inputs.Add("KNXListener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) +} diff --git a/plugins/inputs/knx_listener/knx_listener_test.go b/plugins/inputs/knx_listener/knx_listener_test.go new file mode 100644 index 0000000000000..adb07eb6d0113 --- /dev/null +++ b/plugins/inputs/knx_listener/knx_listener_test.go @@ -0,0 +1,188 @@ +package knx_listener + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/vapourismo/knx-go/knx" + "github.com/vapourismo/knx-go/knx/cemi" + "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf/testutil" +) + +const epsilon = 1e-3 + +func setValue(data dpt.DatapointValue, value interface{}) error { + d := reflect.Indirect(reflect.ValueOf(data)) + if !d.CanSet() { + return fmt.Errorf("cannot set datapoint %v", data) + } + switch v := value.(type) { + case bool: + d.SetBool(v) + case float64: + d.SetFloat(v) + case int64: + d.SetInt(v) + case uint64: + d.SetUint(v) + default: + return fmt.Errorf("unknown type '%T' when setting value for DPT", value) + } + return nil +} + +type TestMessage struct { + address string + dpt string + value interface{} +} + +func ProduceKnxEvent(t *testing.T, address string, datapoint string, value interface{}) *knx.GroupEvent { + addr, err := cemi.NewGroupAddrString(address) + require.NoError(t, err) + + data, ok := dpt.Produce(datapoint) + require.True(t, ok) + err = setValue(data, value) + require.NoError(t, err) + + return &knx.GroupEvent{ + Command: knx.GroupWrite, + Destination: addr, + Data: data.Pack(), + } +} + +func TestRegularReceives_DPT(t *testing.T) { + // Define the test-cases + var testcases = []TestMessage{ + {"1/0/1", "1.001", true}, + {"1/0/2", "1.002", false}, + {"1/0/3", "1.003", true}, + {"1/0/9", "1.009", false}, + {"1/1/0", "1.010", true}, + {"5/0/1", "5.001", 12.157}, + {"5/0/3", "5.003", 121.412}, + {"5/0/4", "5.004", uint64(25)}, + {"9/0/1", "9.001", 18.56}, + {"9/0/4", "9.004", 243.84}, + {"9/0/5", "9.005", 12.01}, + {"9/0/7", "9.007", 59.32}, + {"13/0/1", "13.001", int64(-15)}, + {"13/0/2", "13.002", int64(183)}, + {"13/1/0", "13.010", int64(-141)}, + {"13/1/1", "13.011", int64(277)}, + {"13/1/2", "13.012", int64(-4096)}, + {"13/1/3", "13.013", int64(8192)}, + {"13/1/4", "13.014", int64(-65536)}, + {"13/1/5", "13.015", int64(2147483647)}, + {"14/0/0", "14.000", -1.31}, + {"14/0/1", "14.001", 0.44}, + {"14/0/2", "14.002", 32.08}, + // {"14/0/3", "14.003", 92.69}, + // {"14/0/4", "14.004", 1.00794}, + {"14/1/0", "14.010", 5963.78}, + {"14/1/1", "14.011", 150.95}, + } + acc := &testutil.Accumulator{} + + // Setup the unit-under-test + measurements := make([]Measurement, 0, len(testcases)) + for _, testcase := range testcases { + measurements = append(measurements, Measurement{"test", testcase.dpt, []string{testcase.address}}) + } + listener := KNXListener{ + ServiceType: "dummy", + Measurements: measurements, + Log: testutil.Logger{Name: "knx_listener"}, + } + + // Setup the listener to test + err := listener.Start(acc) + require.NoError(t, err) + client := listener.client.(*KNXDummyInterface) + + tstart := time.Now() + + // Send the defined test data + for _, testcase := range testcases { + event := ProduceKnxEvent(t, testcase.address, testcase.dpt, testcase.value) + client.Send(*event) + } + + // Give the accumulator some time to collect the data + acc.Wait(len(testcases)) + + // Stop the listener + listener.Stop() + tstop := time.Now() + + // Check if we got what we expected + require.Len(t, acc.Metrics, len(testcases)) + for i, m := range acc.Metrics { + require.Equal(t, "test", m.Measurement) + require.Equal(t, testcases[i].address, m.Tags["groupaddress"]) + require.Len(t, m.Fields, 1) + switch v := testcases[i].value.(type) { + case bool, int64, uint64: + require.Equal(t, v, m.Fields["value"]) + case float64: + require.InDelta(t, v, m.Fields["value"], epsilon) + } + require.True(t, !tstop.Before(m.Time)) + require.True(t, !tstart.After(m.Time)) + } +} + +func TestRegularReceives_MultipleMessages(t *testing.T) { + listener := KNXListener{ + ServiceType: "dummy", + Measurements: []Measurement{ + {"temperature", "1.001", []string{"1/1/1"}}, + }, + Log: testutil.Logger{Name: "knx_listener"}, + } + + acc := &testutil.Accumulator{} + + // Setup the listener to test + err := listener.Start(acc) + require.NoError(t, err) + client := listener.client.(*KNXDummyInterface) + + testMessages := []TestMessage{ + {"1/1/1", "1.001", true}, + {"1/1/1", "1.001", false}, + {"1/1/2", "1.001", false}, + {"1/1/2", "1.001", true}, + } + + for _, testcase := range testMessages { + event := ProduceKnxEvent(t, testcase.address, testcase.dpt, testcase.value) + client.Send(*event) + } + + // Give the accumulator some time to collect the data + acc.Wait(2) + + // Stop the listener + listener.Stop() + + // Check if we got what we expected + require.Len(t, acc.Metrics, 2) + + require.Equal(t, "temperature", acc.Metrics[0].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) + require.Len(t, acc.Metrics[0].Fields, 1) + require.Equal(t, true, acc.Metrics[0].Fields["value"]) + + require.Equal(t, "temperature", acc.Metrics[1].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) + require.Len(t, acc.Metrics[1].Fields, 1) + require.Equal(t, false, acc.Metrics[1].Fields["value"]) +} diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index dbed6d6f01edb..7803d4fc4e9eb 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -27,8 +27,6 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. @@ -38,7 +36,7 @@ avoid cardinality issues: ```toml [[inputs.kube_inventory]] ## URL for the Kubernetes API - url = "https://127.0.0.1" + url = "https://$HOSTIP:6443" ## Namespace to use. Set to "" to use all namespaces. # namespace = "default" @@ -70,8 +68,11 @@ avoid cardinality issues: selector_exclude = ["*"] ## Optional TLS Config + ## Trusted root certificates for server # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication # tls_key = "/path/to/keyfile" ## Use TLS but skip chain & host verification # insecure_skip_verify = false @@ -129,6 +130,26 @@ subjects: namespace: default ``` +## Quickstart in k3s + +When monitoring [k3s](https://k3s.io) server instances one can re-use already generated administration token. +This is less secure than using the more restrictive dedicated telegraf user but more convienient to set up. + +```console +# an empty token will make telegraf use the client cert/key files instead +$ touch /run/telegraf-kubernetes-token +# replace `telegraf` with the user the telegraf process is running as +$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.crt /run/telegraf-kubernetes-cert +$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.key /run/telegraf-kubernetes-key +``` + +```toml +[kube_inventory] +bearer_token = "/run/telegraf-kubernetes-token" +tls_cert = "/run/telegraf-kubernetes-cert" +tls_key = "/run/telegraf-kubernetes-key" +``` + ### Metrics: - kubernetes_daemonset @@ -191,9 +212,11 @@ subjects: - node_name - fields: - capacity_cpu_cores + - capacity_millicpu_cores - capacity_memory_bytes - capacity_pods - allocatable_cpu_cores + - allocatable_millicpu_cores - allocatable_memory_bytes - allocatable_pods @@ -222,16 +245,18 @@ subjects: - node_name - pod_name - node_selector (\*varies) + - phase - state - readiness - fields: - restarts_total - state_code - state_reason + - phase_reason - terminated_reason (string, deprecated in 1.15: use `state_reason` instead) - - resource_requests_cpu_units + - resource_requests_millicpu_units - resource_requests_memory_bytes - - resource_limits_cpu_units + - resource_limits_millicpu_units - resource_limits_memory_bytes - kubernetes_service @@ -299,14 +324,12 @@ kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-2 kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,selector_select1=s1,storageclass=ebs-1-retain phase_type=0i 1547597615000000000 kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000 kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000 -kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 +kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,phase=Running,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",phase_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 ``` [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index d9b24ba5c0a95..66455b004f918 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -4,10 +4,12 @@ import ( "context" "time" - "github.com/ericchiang/k8s" - v1APPS "github.com/ericchiang/k8s/apis/apps/v1" - v1 "github.com/ericchiang/k8s/apis/core/v1" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -15,104 +17,89 @@ import ( type client struct { namespace string timeout time.Duration - *k8s.Client + *kubernetes.Clientset } func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) { - c, err := k8s.NewClient(&k8s.Config{ - Clusters: []k8s.NamedCluster{{Name: "cluster", Cluster: k8s.Cluster{ - Server: baseURL, - InsecureSkipTLSVerify: tlsConfig.InsecureSkipVerify, - CertificateAuthority: tlsConfig.TLSCA, - }}}, - Contexts: []k8s.NamedContext{{Name: "context", Context: k8s.Context{ - Cluster: "cluster", - AuthInfo: "auth", - Namespace: namespace, - }}}, - AuthInfos: []k8s.NamedAuthInfo{{Name: "auth", AuthInfo: k8s.AuthInfo{ - Token: bearerToken, - ClientCertificate: tlsConfig.TLSCert, - ClientKey: tlsConfig.TLSKey, - }}}, + c, err := kubernetes.NewForConfig(&rest.Config{ + TLSClientConfig: rest.TLSClientConfig{ + ServerName: baseURL, + Insecure: tlsConfig.InsecureSkipVerify, + CAFile: tlsConfig.TLSCA, + CertFile: tlsConfig.TLSCert, + KeyFile: tlsConfig.TLSKey, + }, + Host: baseURL, + BearerToken: bearerToken, + ContentConfig: rest.ContentConfig{}, }) if err != nil { return nil, err } return &client{ - Client: c, + Clientset: c, timeout: timeout, namespace: namespace, }, nil } -func (c *client) getDaemonSets(ctx context.Context) (*v1APPS.DaemonSetList, error) { - list := new(v1APPS.DaemonSetList) +func (c *client) getDaemonSets(ctx context.Context) (*appsv1.DaemonSetList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().DaemonSets(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getDeployments(ctx context.Context) (*v1APPS.DeploymentList, error) { - list := &v1APPS.DeploymentList{} +func (c *client) getDeployments(ctx context.Context) (*appsv1.DeploymentList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().Deployments(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getEndpoints(ctx context.Context) (*v1.EndpointsList, error) { - list := new(v1.EndpointsList) +func (c *client) getEndpoints(ctx context.Context) (*corev1.EndpointsList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Endpoints(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getIngress(ctx context.Context) (*v1beta1EXT.IngressList, error) { - list := new(v1beta1EXT.IngressList) +func (c *client) getIngress(ctx context.Context) (*netv1.IngressList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.NetworkingV1().Ingresses(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getNodes(ctx context.Context) (*v1.NodeList, error) { - list := new(v1.NodeList) +func (c *client) getNodes(ctx context.Context) (*corev1.NodeList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, "", list) + return c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) } -func (c *client) getPersistentVolumes(ctx context.Context) (*v1.PersistentVolumeList, error) { - list := new(v1.PersistentVolumeList) +func (c *client) getPersistentVolumes(ctx context.Context) (*corev1.PersistentVolumeList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, "", list) + return c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) } -func (c *client) getPersistentVolumeClaims(ctx context.Context) (*v1.PersistentVolumeClaimList, error) { - list := new(v1.PersistentVolumeClaimList) +func (c *client) getPersistentVolumeClaims(ctx context.Context) (*corev1.PersistentVolumeClaimList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().PersistentVolumeClaims(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getPods(ctx context.Context) (*v1.PodList, error) { - list := new(v1.PodList) +func (c *client) getPods(ctx context.Context) (*corev1.PodList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Pods(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) { - list := new(v1.ServiceList) +func (c *client) getServices(ctx context.Context) (*corev1.ServiceList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Services(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getStatefulSets(ctx context.Context) (*v1APPS.StatefulSetList, error) { - list := new(v1APPS.StatefulSetList) +func (c *client) getStatefulSets(ctx context.Context) (*appsv1.StatefulSetList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().StatefulSets(c.namespace).List(ctx, metav1.ListOptions{}) } diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go index 88411ea367ccf..0462c0222d527 100644 --- a/plugins/inputs/kube_inventory/client_test.go +++ b/plugins/inputs/kube_inventory/client_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/util/intstr" "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/stretchr/testify/require" ) type mockHandler struct { @@ -20,24 +20,11 @@ func toInt32Ptr(i int32) *int32 { return &i } -func toInt64Ptr(i int64) *int64 { - return &i -} - func toBoolPtr(b bool) *bool { return &b } -func toIntStrPtrS(s string) *intstr.IntOrString { - return &intstr.IntOrString{StrVal: &s} -} - -func toIntStrPtrI(i int32) *intstr.IntOrString { - return &intstr.IntOrString{IntVal: &i} -} func TestNewClient(t *testing.T) { _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) - if err != nil { - t.Errorf("Failed to create new client - %s", err.Error()) - } + require.NoErrorf(t, err, "Failed to create new client - %v", err) } diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index db612a5e33b2a..e169c8f274662 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/apps/v1" + v1 "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) @@ -16,39 +15,35 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern return } for _, d := range list.Items { - if err = ki.gatherDaemonSet(*d, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherDaemonSet(d, acc) } } -func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) { fields := map[string]interface{}{ - "generation": d.Metadata.GetGeneration(), - "current_number_scheduled": d.Status.GetCurrentNumberScheduled(), - "desired_number_scheduled": d.Status.GetDesiredNumberScheduled(), - "number_available": d.Status.GetNumberAvailable(), - "number_misscheduled": d.Status.GetNumberMisscheduled(), - "number_ready": d.Status.GetNumberReady(), - "number_unavailable": d.Status.GetNumberUnavailable(), - "updated_number_scheduled": d.Status.GetUpdatedNumberScheduled(), + "generation": d.Generation, + "current_number_scheduled": d.Status.CurrentNumberScheduled, + "desired_number_scheduled": d.Status.DesiredNumberScheduled, + "number_available": d.Status.NumberAvailable, + "number_misscheduled": d.Status.NumberMisscheduled, + "number_ready": d.Status.NumberReady, + "number_unavailable": d.Status.NumberUnavailable, + "updated_number_scheduled": d.Status.UpdatedNumberScheduled, } tags := map[string]string{ - "daemonset_name": d.Metadata.GetName(), - "namespace": d.Metadata.GetNamespace(), + "daemonset_name": d.Name, + "namespace": d.Namespace, } - for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range d.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } - if d.Metadata.CreationTimestamp.GetSeconds() != 0 { - fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano() + creationTs := d.GetCreationTimestamp() + if !creationTs.IsZero() { + fields["created"] = d.GetCreationTimestamp().UnixNano() } acc.AddFields(daemonSetMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index 0a13f1e42cb3d..5c67f39432dae 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -1,15 +1,16 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestDaemonSet(t *testing.T) { @@ -21,7 +22,7 @@ func TestDaemonSet(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -38,28 +39,28 @@ func TestDaemonSet(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/daemonsets/": &v1.DaemonSetList{ - Items: []*v1.DaemonSet{ + Items: []v1.DaemonSet{ { - Status: &v1.DaemonSetStatus{ - CurrentNumberScheduled: toInt32Ptr(3), - DesiredNumberScheduled: toInt32Ptr(5), - NumberAvailable: toInt32Ptr(2), - NumberMisscheduled: toInt32Ptr(2), - NumberReady: toInt32Ptr(1), - NumberUnavailable: toInt32Ptr(1), - UpdatedNumberScheduled: toInt32Ptr(2), + Status: v1.DaemonSetStatus{ + CurrentNumberScheduled: 3, + DesiredNumberScheduled: 5, + NumberAvailable: 2, + NumberMisscheduled: 2, + NumberReady: 1, + NumberUnavailable: 1, + UpdatedNumberScheduled: 2, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("daemon1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "daemon1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, - Spec: &v1.DaemonSetSpec{ + Spec: v1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", @@ -72,28 +73,28 @@ func TestDaemonSet(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "generation": int64(11221), - "current_number_scheduled": int32(3), - "desired_number_scheduled": int32(5), - "number_available": int32(2), - "number_misscheduled": int32(2), - "number_ready": int32(1), - "number_unavailable": int32(1), - "updated_number_scheduled": int32(2), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "daemonset_name": "daemon1", - "namespace": "ns1", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_daemonset", + map[string]string{ + "daemonset_name": "daemon1", + "namespace": "ns1", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "generation": int64(11221), + "current_number_scheduled": int32(3), + "desired_number_scheduled": int32(5), + "number_available": int32(2), + "number_misscheduled": int32(2), + "number_ready": int32(1), + "number_unavailable": int32(1), + "updated_number_scheduled": int32(2), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -105,37 +106,23 @@ func TestDaemonSet(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(*dset, acc) - if err != nil { - t.Errorf("Failed to gather daemonset - %s", err.Error()) - } + ks.gatherDaemonSet(dset, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -146,28 +133,28 @@ func TestDaemonSetSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/daemonsets/": &v1.DaemonSetList{ - Items: []*v1.DaemonSet{ + Items: []v1.DaemonSet{ { - Status: &v1.DaemonSetStatus{ - CurrentNumberScheduled: toInt32Ptr(3), - DesiredNumberScheduled: toInt32Ptr(5), - NumberAvailable: toInt32Ptr(2), - NumberMisscheduled: toInt32Ptr(2), - NumberReady: toInt32Ptr(1), - NumberUnavailable: toInt32Ptr(1), - UpdatedNumberScheduled: toInt32Ptr(2), + Status: v1.DaemonSetStatus{ + CurrentNumberScheduled: 3, + DesiredNumberScheduled: 5, + NumberAvailable: 2, + NumberMisscheduled: 2, + NumberReady: 1, + NumberUnavailable: 1, + UpdatedNumberScheduled: 2, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("daemon1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "daemon1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: time.Now()}, }, - Spec: &v1.DaemonSetSpec{ + Spec: v1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", @@ -281,13 +268,10 @@ func TestDaemonSetSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(*dset, acc) - if err != nil { - t.Errorf("Failed to gather daemonset - %s", err.Error()) - } + ks.gatherDaemonSet(dset, acc) } // Grab selector tags @@ -300,8 +284,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go index b91216765e9a6..510cc68cecaa7 100644 --- a/plugins/inputs/kube_inventory/deployment.go +++ b/plugins/inputs/kube_inventory/deployment.go @@ -2,10 +2,9 @@ package kube_inventory import ( "context" - "time" - v1 "github.com/ericchiang/k8s/apis/apps/v1" "github.com/influxdata/telegraf" + v1 "k8s.io/api/apps/v1" ) func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { @@ -15,30 +14,25 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber return } for _, d := range list.Items { - if err = ki.gatherDeployment(*d, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherDeployment(d, acc) } } -func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) { fields := map[string]interface{}{ - "replicas_available": d.Status.GetAvailableReplicas(), - "replicas_unavailable": d.Status.GetUnavailableReplicas(), - "created": time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "replicas_available": d.Status.AvailableReplicas, + "replicas_unavailable": d.Status.UnavailableReplicas, + "created": d.GetCreationTimestamp().UnixNano(), } tags := map[string]string{ - "deployment_name": d.Metadata.GetName(), - "namespace": d.Metadata.GetNamespace(), + "deployment_name": d.Name, + "namespace": d.Namespace, } - for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range d.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } acc.AddFields(deploymentMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index 9407c84d91322..277377619fe84 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -1,15 +1,17 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/util/intstr" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestDeployment(t *testing.T) { @@ -18,24 +20,11 @@ func TestDeployment(t *testing.T) { selectExclude := []string{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) - outputMetric := &testutil.Metric{ - Fields: map[string]interface{}{ - "replicas_available": int32(1), - "replicas_unavailable": int32(4), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "namespace": "ns1", - "deployment_name": "deploy1", - "selector_select1": "s1", - "selector_select2": "s2", - }, - } tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -52,23 +41,23 @@ func TestDeployment(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/deployments/": &v1.DeploymentList{ - Items: []*v1.Deployment{ + Items: []v1.Deployment{ { - Status: &v1.DeploymentStatus{ - Replicas: toInt32Ptr(3), - AvailableReplicas: toInt32Ptr(1), - UnavailableReplicas: toInt32Ptr(4), - UpdatedReplicas: toInt32Ptr(2), - ObservedGeneration: toInt64Ptr(9121), + Status: v1.DeploymentStatus{ + Replicas: 3, + AvailableReplicas: 1, + UnavailableReplicas: 4, + UpdatedReplicas: 2, + ObservedGeneration: 9121, }, - Spec: &v1.DeploymentSpec{ - Strategy: &v1.DeploymentStrategy{ + Spec: v1.DeploymentSpec{ + Strategy: v1.DeploymentStrategy{ RollingUpdate: &v1.RollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{ - IntVal: toInt32Ptr(30), + IntVal: 30, }, MaxSurge: &intstr.IntOrString{ - IntVal: toInt32Ptr(20), + IntVal: 20, }, }, }, @@ -80,25 +69,37 @@ func TestDeployment(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("deploy1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "deploy1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - outputMetric, - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_deployment", + map[string]string{ + "namespace": "ns1", + "deployment_name": "deploy1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "replicas_available": int32(1), + "replicas_unavailable": int32(4), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -110,37 +111,23 @@ func TestDeployment(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(*deployment, acc) - if err != nil { - t.Errorf("Failed to gather deployment - %s", err.Error()) - } + ks.gatherDeployment(deployment, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -151,23 +138,23 @@ func TestDeploymentSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/deployments/": &v1.DeploymentList{ - Items: []*v1.Deployment{ + Items: []v1.Deployment{ { - Status: &v1.DeploymentStatus{ - Replicas: toInt32Ptr(3), - AvailableReplicas: toInt32Ptr(1), - UnavailableReplicas: toInt32Ptr(4), - UpdatedReplicas: toInt32Ptr(2), - ObservedGeneration: toInt64Ptr(9121), + Status: v1.DeploymentStatus{ + Replicas: 3, + AvailableReplicas: 1, + UnavailableReplicas: 4, + UpdatedReplicas: 2, + ObservedGeneration: 9121, }, - Spec: &v1.DeploymentSpec{ - Strategy: &v1.DeploymentStrategy{ + Spec: v1.DeploymentSpec{ + Strategy: v1.DeploymentStrategy{ RollingUpdate: &v1.RollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{ - IntVal: toInt32Ptr(30), + IntVal: 30, }, MaxSurge: &intstr.IntOrString{ - IntVal: toInt32Ptr(20), + IntVal: 20, }, }, }, @@ -179,15 +166,15 @@ func TestDeploymentSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("deploy1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "deploy1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -295,13 +282,10 @@ func TestDeploymentSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(*deployment, acc) - if err != nil { - t.Errorf("Failed to gather deployment - %s", err.Error()) - } + ks.gatherDeployment(deployment, acc) } // Grab selector tags @@ -314,8 +298,7 @@ func TestDeploymentSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go index 7298789da8e08..1eb86eea13b76 100644 --- a/plugins/inputs/kube_inventory/endpoint.go +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -3,11 +3,9 @@ package kube_inventory import ( "context" "strings" - "time" - - "github.com/ericchiang/k8s/apis/core/v1" "github.com/influxdata/telegraf" + corev1 "k8s.io/api/core/v1" ) func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { @@ -17,66 +15,66 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne return } for _, i := range list.Items { - if err = ki.gatherEndpoint(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherEndpoint(i, acc) } } -func (ki *KubernetesInventory) gatherEndpoint(e v1.Endpoints, acc telegraf.Accumulator) error { - if e.Metadata.CreationTimestamp.GetSeconds() == 0 && e.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { + creationTs := e.GetCreationTimestamp() + if creationTs.IsZero() { + return } fields := map[string]interface{}{ - "created": time.Unix(e.Metadata.CreationTimestamp.GetSeconds(), int64(e.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": e.Metadata.GetGeneration(), + "created": e.GetCreationTimestamp().UnixNano(), + "generation": e.Generation, } tags := map[string]string{ - "endpoint_name": e.Metadata.GetName(), - "namespace": e.Metadata.GetNamespace(), + "endpoint_name": e.Name, + "namespace": e.Namespace, } - for _, endpoint := range e.GetSubsets() { - for _, readyAddr := range endpoint.GetAddresses() { + for _, endpoint := range e.Subsets { + for _, readyAddr := range endpoint.Addresses { fields["ready"] = true - tags["hostname"] = readyAddr.GetHostname() - tags["node_name"] = readyAddr.GetNodeName() + tags["hostname"] = readyAddr.Hostname + if readyAddr.NodeName != nil { + tags["node_name"] = *readyAddr.NodeName + } if readyAddr.TargetRef != nil { - tags[strings.ToLower(readyAddr.GetTargetRef().GetKind())] = readyAddr.GetTargetRef().GetName() + tags[strings.ToLower(readyAddr.TargetRef.Kind)] = readyAddr.TargetRef.Name } - for _, port := range endpoint.GetPorts() { - fields["port"] = port.GetPort() + for _, port := range endpoint.Ports { + fields["port"] = port.Port - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) acc.AddFields(endpointMeasurement, fields, tags) } } - for _, notReadyAddr := range endpoint.GetNotReadyAddresses() { + for _, notReadyAddr := range endpoint.NotReadyAddresses { fields["ready"] = false - tags["hostname"] = notReadyAddr.GetHostname() - tags["node_name"] = notReadyAddr.GetNodeName() + tags["hostname"] = notReadyAddr.Hostname + if notReadyAddr.NodeName != nil { + tags["node_name"] = *notReadyAddr.NodeName + } if notReadyAddr.TargetRef != nil { - tags[strings.ToLower(notReadyAddr.GetTargetRef().GetKind())] = notReadyAddr.GetTargetRef().GetName() + tags[strings.ToLower(notReadyAddr.TargetRef.Kind)] = notReadyAddr.TargetRef.Name } - for _, port := range endpoint.GetPorts() { - fields["port"] = port.GetPort() + for _, port := range endpoint.Ports { + fields["port"] = port.Port - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) acc.AddFields(endpointMeasurement, fields, tags) } } } - - return nil } diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index b88c388162bd2..936a64b72544b 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -4,9 +4,12 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestEndpoint(t *testing.T) { @@ -18,7 +21,7 @@ func TestEndpoint(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -35,60 +38,60 @@ func TestEndpoint(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/endpoints/": &v1.EndpointsList{ - Items: []*v1.Endpoints{ + Items: []v1.Endpoints{ { - Subsets: []*v1.EndpointSubset{ + Subsets: []v1.EndpointSubset{ { - Addresses: []*v1.EndpointAddress{ + Addresses: []v1.EndpointAddress{ { - Hostname: toStrPtr("storage-6"), + Hostname: "storage-6", NodeName: toStrPtr("b.storage.internal"), TargetRef: &v1.ObjectReference{ - Kind: toStrPtr("pod"), - Name: toStrPtr("storage-6"), + Kind: "pod", + Name: "storage-6", }, }, }, - Ports: []*v1.EndpointPort{ + Ports: []v1.EndpointPort{ { - Name: toStrPtr("server"), - Protocol: toStrPtr("TCP"), - Port: toInt32Ptr(8080), + Name: "server", + Protocol: "TCP", + Port: 8080, }, }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("storage"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "ready": true, - "port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "endpoint_name": "storage", - "namespace": "ns1", - "hostname": "storage-6", - "node_name": "b.storage.internal", - "port_name": "server", - "port_protocol": "TCP", - "pod": "storage-6", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", }, - }, + map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -97,61 +100,157 @@ func TestEndpoint(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/endpoints/": &v1.EndpointsList{ - Items: []*v1.Endpoints{ + Items: []v1.Endpoints{ { - Subsets: []*v1.EndpointSubset{ + Subsets: []v1.EndpointSubset{ { - NotReadyAddresses: []*v1.EndpointAddress{ + NotReadyAddresses: []v1.EndpointAddress{ { - Hostname: toStrPtr("storage-6"), + Hostname: "storage-6", NodeName: toStrPtr("b.storage.internal"), TargetRef: &v1.ObjectReference{ - Kind: toStrPtr("pod"), - Name: toStrPtr("storage-6"), + Kind: "pod", + Name: "storage-6", }, }, }, - Ports: []*v1.EndpointPort{ + Ports: []v1.EndpointPort{ { - Name: toStrPtr("server"), - Protocol: toStrPtr("TCP"), - Port: toInt32Ptr(8080), + Name: "server", + Protocol: "TCP", + Port: 8080, }, }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("storage"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "ready": false, - "port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "endpoint_name": "storage", - "namespace": "ns1", - "hostname": "storage-6", - "node_name": "b.storage.internal", - "port_name": "server", - "port_protocol": "TCP", - "pod": "storage-6", + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "endpoints missing node_name", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{ + Items: []v1.Endpoints{ + { + Subsets: []v1.EndpointSubset{ + { + NotReadyAddresses: []v1.EndpointAddress{ + { + Hostname: "storage-6", + TargetRef: &v1.ObjectReference{ + Kind: "pod", + Name: "storage-6", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "server", + Protocol: "TCP", + Port: 8080, + }, + }, + }, + { + Addresses: []v1.EndpointAddress{ + { + Hostname: "storage-12", + TargetRef: &v1.ObjectReference{ + Kind: "pod", + Name: "storage-12", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "server", + Protocol: "TCP", + Port: 8080, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-12", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-12", + }, + map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -162,33 +261,19 @@ func TestEndpoint(t *testing.T) { } acc := new(testutil.Accumulator) for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items { - err := ks.gatherEndpoint(*endpoint, acc) - if err != nil { - t.Errorf("Failed to gather endpoint - %s", err.Error()) - } + ks.gatherEndpoint(endpoint, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go index 6d5c8019927cf..f8a966bc15a46 100644 --- a/plugins/inputs/kube_inventory/ingress.go +++ b/plugins/inputs/kube_inventory/ingress.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + netv1 "k8s.io/api/networking/v1" "github.com/influxdata/telegraf" ) @@ -16,45 +15,47 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete return } for _, i := range list.Items { - if err = ki.gatherIngress(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherIngress(i, acc) } } -func (ki *KubernetesInventory) gatherIngress(i v1beta1EXT.Ingress, acc telegraf.Accumulator) error { - if i.Metadata.CreationTimestamp.GetSeconds() == 0 && i.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { + creationTs := i.GetCreationTimestamp() + if creationTs.IsZero() { + return } fields := map[string]interface{}{ - "created": time.Unix(i.Metadata.CreationTimestamp.GetSeconds(), int64(i.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": i.Metadata.GetGeneration(), + "created": i.GetCreationTimestamp().UnixNano(), + "generation": i.Generation, } tags := map[string]string{ - "ingress_name": i.Metadata.GetName(), - "namespace": i.Metadata.GetNamespace(), + "ingress_name": i.Name, + "namespace": i.Namespace, } - for _, ingress := range i.GetStatus().GetLoadBalancer().GetIngress() { - tags["hostname"] = ingress.GetHostname() - tags["ip"] = ingress.GetIp() + for _, ingress := range i.Status.LoadBalancer.Ingress { + tags["hostname"] = ingress.Hostname + tags["ip"] = ingress.IP - for _, rule := range i.GetSpec().GetRules() { - for _, path := range rule.GetIngressRuleValue().GetHttp().GetPaths() { - fields["backend_service_port"] = path.GetBackend().GetServicePort().GetIntVal() - fields["tls"] = i.GetSpec().GetTls() != nil + for _, rule := range i.Spec.Rules { + if rule.IngressRuleValue.HTTP == nil { + continue + } + for _, path := range rule.IngressRuleValue.HTTP.Paths { + if path.Backend.Service != nil { + tags["backend_service_name"] = path.Backend.Service.Name + fields["backend_service_port"] = path.Backend.Service.Port.Number + } - tags["backend_service_name"] = path.GetBackend().GetServiceName() - tags["path"] = path.GetPath() - tags["host"] = rule.GetHost() + fields["tls"] = i.Spec.TLS != nil + + tags["path"] = path.Path + tags["host"] = rule.Host acc.AddFields(ingressMeasurement, fields, tags) } } } - - return nil } diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index 2d111801a96f3..77ceceaac22ba 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -4,10 +4,13 @@ import ( "testing" "time" - v1 "github.com/ericchiang/k8s/apis/core/v1" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestIngress(t *testing.T) { @@ -19,14 +22,14 @@ func TestIngress(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no ingress", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/ingress/": &v1beta1EXT.IngressList{}, + "/ingress/": netv1.IngressList{}, }, }, hasError: false, @@ -35,31 +38,35 @@ func TestIngress(t *testing.T) { name: "collect ingress", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/ingress/": &v1beta1EXT.IngressList{ - Items: []*v1beta1EXT.Ingress{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ { - Status: &v1beta1EXT.IngressStatus{ - LoadBalancer: &v1.LoadBalancerStatus{ - Ingress: []*v1.LoadBalancerIngress{ + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ { - Hostname: toStrPtr("chron-1"), - Ip: toStrPtr("1.0.0.127"), + Hostname: "chron-1", + IP: "1.0.0.127", }, }, }, }, - Spec: &v1beta1EXT.IngressSpec{ - Rules: []*v1beta1EXT.IngressRule{ + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ { - Host: toStrPtr("ui.internal"), - IngressRuleValue: &v1beta1EXT.IngressRuleValue{ - Http: &v1beta1EXT.HTTPIngressRuleValue{ - Paths: []*v1beta1EXT.HTTPIngressPath{ + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: &netv1.HTTPIngressRuleValue{ + Paths: []netv1.HTTPIngressPath{ { - Path: toStrPtr("/"), - Backend: &v1beta1EXT.IngressBackend{ - ServiceName: toStrPtr("chronografd"), - ServicePort: toIntStrPtrI(8080), + Path: "/", + Backend: netv1.IngressBackend{ + Service: &netv1.IngressServiceBackend{ + Name: "chronografd", + Port: netv1.ServiceBackendPort{ + Number: 8080, + }, + }, }, }, }, @@ -68,38 +75,146 @@ func TestIngress(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("ui-lb"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "tls": false, - "backend_service_port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_ingress", + map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "backend_service_name": "chronografd", + "host": "ui.internal", + "path": "/", + }, + map[string]interface{}{ + "tls": false, + "backend_service_port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no HTTPIngressRuleValue", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ + { + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + Hostname: "chron-1", + IP: "1.0.0.127", + }, + }, + }, + }, + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ + { + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: nil, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, - Tags: map[string]string{ - "ingress_name": "ui-lb", - "namespace": "ns1", - "ip": "1.0.0.127", - "hostname": "chron-1", - "backend_service_name": "chronografd", - "host": "ui.internal", - "path": "/", + }, + }, + }, + hasError: false, + }, + { + name: "no IngressServiceBackend", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ + { + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + Hostname: "chron-1", + IP: "1.0.0.127", + }, + }, + }, + }, + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ + { + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: &netv1.HTTPIngressRuleValue{ + Paths: []netv1.HTTPIngressPath{ + { + Path: "/", + Backend: netv1.IngressBackend{ + Service: nil, + }, + }, + }, + }, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_ingress", + map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "host": "ui.internal", + "path": "/", + }, + map[string]interface{}{ + "tls": false, + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -109,34 +224,20 @@ func TestIngress(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, ingress := range ((v.handler.responseMap["/ingress/"]).(*v1beta1EXT.IngressList)).Items { - err := ks.gatherIngress(*ingress, acc) - if err != nil { - t.Errorf("Failed to gather ingress - %s", err.Error()) - } + for _, ingress := range ((v.handler.responseMap["/ingress/"]).(netv1.IngressList)).Items { + ks.gatherIngress(ingress, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 0a2a882974e67..94cb5faf9048b 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -3,18 +3,17 @@ package kube_inventory import ( "context" "fmt" - "io/ioutil" - "log" + "os" "strconv" "strings" "sync" "time" - "github.com/kubernetes/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/api/resource" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -25,18 +24,20 @@ const ( // KubernetesInventory represents the config object for the plugin. type KubernetesInventory struct { - URL string `toml:"url"` - BearerToken string `toml:"bearer_token"` - BearerTokenString string `toml:"bearer_token_string"` - Namespace string `toml:"namespace"` - ResponseTimeout internal.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h - ResourceExclude []string `toml:"resource_exclude"` - ResourceInclude []string `toml:"resource_include"` - MaxConfigMapAge internal.Duration `toml:"max_config_map_age"` + URL string `toml:"url"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` + Namespace string `toml:"namespace"` + ResponseTimeout config.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h + ResourceExclude []string `toml:"resource_exclude"` + ResourceInclude []string `toml:"resource_include"` + MaxConfigMapAge config.Duration `toml:"max_config_map_age"` SelectorInclude []string `toml:"selector_include"` SelectorExclude []string `toml:"selector_exclude"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig client *client @@ -101,7 +102,7 @@ func (ki *KubernetesInventory) Init() error { } if ki.BearerToken != "" { - token, err := ioutil.ReadFile(ki.BearerToken) + token, err := os.ReadFile(ki.BearerToken) if err != nil { return err } @@ -109,7 +110,7 @@ func (ki *KubernetesInventory) Init() error { } var err error - ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig) + ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, time.Duration(ki.ResponseTimeout), ki.ClientConfig) if err != nil { return err @@ -166,18 +167,18 @@ func atoi(s string) int64 { if err != nil { return 0 } - return int64(i) + return i } -func convertQuantity(s string, m float64) int64 { +func (ki *KubernetesInventory) convertQuantity(s string, m float64) int64 { q, err := resource.ParseQuantity(s) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error()) + ki.Log.Debugf("failed to parse quantity: %s", err.Error()) return 0 } f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error()) + ki.Log.Debugf("failed to parse float: %s", err.Error()) return 0 } if m < 1 { @@ -187,11 +188,11 @@ func convertQuantity(s string, m float64) int64 { } func (ki *KubernetesInventory) createSelectorFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) + selectorFilter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) if err != nil { return err } - ki.selectorFilter = filter + ki.selectorFilter = selectorFilter return nil } @@ -211,7 +212,7 @@ var ( func init() { inputs.Add("kube_inventory", func() telegraf.Input { return &KubernetesInventory{ - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), Namespace: "default", SelectorInclude: []string{}, SelectorExclude: []string{"*"}, diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index cccf6897f8aa3..b46b4e6209ffc 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -15,42 +15,39 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI return } for _, n := range list.Items { - if err = ki.gatherNode(*n, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherNode(n, acc) } } -func (ki *KubernetesInventory) gatherNode(n v1.Node, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulator) { fields := map[string]interface{}{} tags := map[string]string{ - "node_name": *n.Metadata.Name, + "node_name": n.Name, } for resourceName, val := range n.Status.Capacity { switch resourceName { case "cpu": - fields["capacity_cpu_cores"] = atoi(val.GetString_()) + fields["capacity_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["capacity_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["capacity_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["capacity_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": - fields["capacity_pods"] = atoi(val.GetString_()) + fields["capacity_pods"] = atoi(val.String()) } } for resourceName, val := range n.Status.Allocatable { switch resourceName { case "cpu": - fields["allocatable_cpu_cores"] = atoi(val.GetString_()) + fields["allocatable_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["allocatable_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["allocatable_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["allocatable_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": - fields["allocatable_pods"] = atoi(val.GetString_()) + fields["allocatable_pods"] = atoi(val.String()) } } acc.AddFields(nodeMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go index 7573dd2c06f6d..02f330a7d1a2f 100644 --- a/plugins/inputs/kube_inventory/node_test.go +++ b/plugins/inputs/kube_inventory/node_test.go @@ -4,11 +4,13 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/apis/resource" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestNode(t *testing.T) { @@ -19,14 +21,14 @@ func TestNode(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no nodes", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/nodes/": &v1.NodeList{}, + "/nodes/": corev1.NodeList{}, }, }, hasError: false, @@ -35,86 +37,87 @@ func TestNode(t *testing.T) { name: "collect nodes", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/nodes/": &v1.NodeList{ - Items: []*v1.Node{ + "/nodes/": corev1.NodeList{ + Items: []corev1.Node{ { - Status: &v1.NodeStatus{ - NodeInfo: &v1.NodeSystemInfo{ - KernelVersion: toStrPtr("4.14.48-coreos-r2"), - OsImage: toStrPtr("Container Linux by CoreOS 1745.7.0 (Rhyolite)"), - ContainerRuntimeVersion: toStrPtr("docker://18.3.1"), - KubeletVersion: toStrPtr("v1.10.3"), - KubeProxyVersion: toStrPtr("v1.10.3"), + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + KernelVersion: "4.14.48-coreos-r2", + OSImage: "Container Linux by CoreOS 1745.7.0 (Rhyolite)", + ContainerRuntimeVersion: "docker://18.3.1", + KubeletVersion: "v1.10.3", + KubeProxyVersion: "v1.10.3", }, - Phase: toStrPtr("Running"), - Capacity: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("16")}, - "ephemeral_storage_bytes": {String_: toStrPtr("49536401408")}, - "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, - "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, - "memory": {String_: toStrPtr("125817904Ki")}, - "pods": {String_: toStrPtr("110")}, + Phase: "Running", + Capacity: corev1.ResourceList{ + "cpu": resource.MustParse("16"), + "ephemeral_storage_bytes": resource.MustParse("49536401408"), + "hugepages_1Gi_bytes": resource.MustParse("0"), + "hugepages_2Mi_bytes": resource.MustParse("0"), + "memory": resource.MustParse("125817904Ki"), + "pods": resource.MustParse("110"), }, - Allocatable: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("16")}, - "ephemeral_storage_bytes": {String_: toStrPtr("44582761194")}, - "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, - "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, - "memory": {String_: toStrPtr("125715504Ki")}, - "pods": {String_: toStrPtr("110")}, + Allocatable: corev1.ResourceList{ + "cpu": resource.MustParse("1000m"), + "ephemeral_storage_bytes": resource.MustParse("44582761194"), + "hugepages_1Gi_bytes": resource.MustParse("0"), + "hugepages_2Mi_bytes": resource.MustParse("0"), + "memory": resource.MustParse("125715504Ki"), + "pods": resource.MustParse("110"), }, - Conditions: []*v1.NodeCondition{ - {Type: toStrPtr("Ready"), Status: toStrPtr("true"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}}, - {Type: toStrPtr("OutOfDisk"), Status: toStrPtr("false"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}}, + Conditions: []corev1.NodeCondition{ + {Type: "Ready", Status: "true", LastTransitionTime: metav1.Time{Time: now}}, + {Type: "OutOfDisk", Status: "false", LastTransitionTime: metav1.Time{Time: created}}, }, }, - Spec: &v1.NodeSpec{ - ProviderID: toStrPtr("aws:///us-east-1c/i-0c00"), - Taints: []*v1.Taint{ + Spec: corev1.NodeSpec{ + ProviderID: "aws:///us-east-1c/i-0c00", + Taints: []corev1.Taint{ { - Key: toStrPtr("k1"), - Value: toStrPtr("v1"), - Effect: toStrPtr("NoExecute"), + Key: "k1", + Value: "v1", + Effect: "NoExecute", }, { - Key: toStrPtr("k2"), - Value: toStrPtr("v2"), - Effect: toStrPtr("NoSchedule"), + Key: "k2", + Value: "v2", + Effect: "NoSchedule", }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(int64(11232)), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("node1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11232, + Namespace: "ns1", + Name: "node1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Measurement: nodeMeasurement, - Fields: map[string]interface{}{ - "capacity_cpu_cores": int64(16), - "capacity_memory_bytes": int64(1.28837533696e+11), - "capacity_pods": int64(110), - "allocatable_cpu_cores": int64(16), - "allocatable_memory_bytes": int64(1.28732676096e+11), - "allocatable_pods": int64(110), - }, - Tags: map[string]string{ - "node_name": "node1", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + nodeMeasurement, + map[string]string{ + "node_name": "node1", }, - }, + map[string]interface{}{ + "capacity_cpu_cores": int64(16), + "capacity_millicpu_cores": int64(16000), + "capacity_memory_bytes": int64(1.28837533696e+11), + "capacity_pods": int64(110), + "allocatable_cpu_cores": int64(1), + "allocatable_millicpu_cores": int64(1000), + "allocatable_memory_bytes": int64(1.28732676096e+11), + "allocatable_pods": int64(110), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -125,48 +128,20 @@ func TestNode(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, node := range ((v.handler.responseMap["/nodes/"]).(*v1.NodeList)).Items { - err := ks.gatherNode(*node, acc) - if err != nil { - t.Errorf("Failed to gather node - %s", err.Error()) - } + for _, node := range ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items { + ks.gatherNode(node, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - measurement := v.output.Metrics[i].Measurement - var keyTag string - switch measurement { - case nodeMeasurement: - keyTag = "node" - } - var j int - for j = range acc.Metrics { - if acc.Metrics[j].Measurement == measurement && - acc.Metrics[j].Tags[keyTag] == v.output.Metrics[i].Tags[keyTag] { - break - } - } - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[j].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, measurement %s, j %d\n", v.name, k, m, acc.Metrics[j].Tags[k], measurement, j) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[j].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), measurement %s, j %d\n", v.name, k, m, m, acc.Metrics[j].Fields[k], acc.Metrics[i].Fields[k], measurement, j) - } - } - } - } + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/persistentvolume.go b/plugins/inputs/kube_inventory/persistentvolume.go index 05600522b7ea8..4199dfed9e4c3 100644 --- a/plugins/inputs/kube_inventory/persistentvolume.go +++ b/plugins/inputs/kube_inventory/persistentvolume.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,16 +16,13 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki return } for _, pv := range list.Items { - if err = ki.gatherPersistentVolume(*pv, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPersistentVolume(pv, acc) } } -func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherPersistentVolume(pv corev1.PersistentVolume, acc telegraf.Accumulator) { phaseType := 5 - switch strings.ToLower(pv.Status.GetPhase()) { + switch strings.ToLower(string(pv.Status.Phase)) { case "bound": phaseType = 0 case "failed": @@ -41,12 +38,10 @@ func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, ac "phase_type": phaseType, } tags := map[string]string{ - "pv_name": pv.Metadata.GetName(), - "phase": pv.Status.GetPhase(), - "storageclass": pv.Spec.GetStorageClassName(), + "pv_name": pv.Name, + "phase": string(pv.Status.Phase), + "storageclass": pv.Spec.StorageClassName, } acc.AddFields(persistentVolumeMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go index a5d20d047331a..2f62081afb7f6 100644 --- a/plugins/inputs/kube_inventory/persistentvolume_test.go +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -4,10 +4,12 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPersistentVolume(t *testing.T) { @@ -18,14 +20,14 @@ func TestPersistentVolume(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no pv", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumes/": &v1.PersistentVolumeList{}, + "/persistentvolumes/": &corev1.PersistentVolumeList{}, }, }, hasError: false, @@ -34,41 +36,41 @@ func TestPersistentVolume(t *testing.T) { name: "collect pvs", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumes/": &v1.PersistentVolumeList{ - Items: []*v1.PersistentVolume{ + "/persistentvolumes/": &corev1.PersistentVolumeList{ + Items: []corev1.PersistentVolume{ { - Status: &v1.PersistentVolumeStatus{ - Phase: toStrPtr("pending"), + Status: corev1.PersistentVolumeStatus{ + Phase: "pending", }, - Spec: &v1.PersistentVolumeSpec{ - StorageClassName: toStrPtr("ebs-1"), + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "ebs-1", }, - Metadata: &metav1.ObjectMeta{ - Name: toStrPtr("pv1"), + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "phase_type": 2, - }, - Tags: map[string]string{ - "pv_name": "pv1", - "storageclass": "ebs-1", - "phase": "pending", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolume", + map[string]string{ + "pv_name": "pv1", + "storageclass": "ebs-1", + "phase": "pending", }, - }, + map[string]interface{}{ + "phase_type": 2, + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -79,34 +81,20 @@ func TestPersistentVolume(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*v1.PersistentVolumeList)).Items { - err := ks.gatherPersistentVolume(*pv, acc) - if err != nil { - t.Errorf("Failed to gather pv - %s", err.Error()) - } + for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items { + ks.gatherPersistentVolume(pv, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go index ac8c9f85a931c..2b06cce6b9fbb 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,16 +16,13 @@ func collectPersistentVolumeClaims(ctx context.Context, acc telegraf.Accumulator return } for _, pvc := range list.Items { - if err = ki.gatherPersistentVolumeClaim(*pvc, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPersistentVolumeClaim(pvc, acc) } } -func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolumeClaim, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc corev1.PersistentVolumeClaim, acc telegraf.Accumulator) { phaseType := 3 - switch strings.ToLower(pvc.Status.GetPhase()) { + switch strings.ToLower(string(pvc.Status.Phase)) { case "bound": phaseType = 0 case "lost": @@ -37,18 +34,20 @@ func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolu "phase_type": phaseType, } tags := map[string]string{ - "pvc_name": pvc.Metadata.GetName(), - "namespace": pvc.Metadata.GetNamespace(), - "phase": pvc.Status.GetPhase(), - "storageclass": pvc.Spec.GetStorageClassName(), + "pvc_name": pvc.Name, + "namespace": pvc.Namespace, + "phase": string(pvc.Status.Phase), + } + if pvc.Spec.StorageClassName != nil { + tags["storageclass"] = *pvc.Spec.StorageClassName } - for key, val := range pvc.GetSpec().GetSelector().GetMatchLabels() { - if ki.selectorFilter.Match(key) { - tags["selector_"+key] = val + if pvc.Spec.Selector != nil { + for key, val := range pvc.Spec.Selector.MatchLabels { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } } } acc.AddFields(persistentVolumeClaimMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go index 5155a5d3ba698..00da84f9f757a 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -1,15 +1,16 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPersistentVolumeClaim(t *testing.T) { @@ -22,14 +23,14 @@ func TestPersistentVolumeClaim(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no pv claims", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{}, + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{}, }, }, hasError: false, @@ -38,14 +39,14 @@ func TestPersistentVolumeClaim(t *testing.T) { name: "collect pv claims", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ - Items: []*v1.PersistentVolumeClaim{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ { - Status: &v1.PersistentVolumeClaimStatus{ - Phase: toStrPtr("bound"), + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", }, - Spec: &v1.PersistentVolumeClaimSpec{ - VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", StorageClassName: toStrPtr("ebs-1"), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -54,37 +55,135 @@ func TestPersistentVolumeClaim(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pc1"), + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "phase_type": 0, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no label selectors", + hasError: false, + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ + { + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", + StorageClassName: toStrPtr("ebs-1"), + Selector: nil, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, - Tags: map[string]string{ - "pvc_name": "pc1", - "namespace": "ns1", - "storageclass": "ebs-1", - "phase": "bound", - "selector_select1": "s1", - "selector_select2": "s2", + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "no storage class name", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ + { + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", + StorageClassName: nil, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "phase": "bound", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -95,37 +194,23 @@ func TestPersistentVolumeClaim(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { - err := ks.gatherPersistentVolumeClaim(*pvc, acc) - if err != nil { - t.Errorf("Failed to gather pvc - %s", err.Error()) - } + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { + ks.gatherPersistentVolumeClaim(pvc, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -135,14 +220,14 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) responseMap := map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ - Items: []*v1.PersistentVolumeClaim{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ { - Status: &v1.PersistentVolumeClaimStatus{ - Phase: toStrPtr("bound"), + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", }, - Spec: &v1.PersistentVolumeClaimSpec{ - VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", StorageClassName: toStrPtr("ebs-1"), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -151,14 +236,14 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pc1"), + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -266,13 +351,10 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { - err := ks.gatherPersistentVolumeClaim(*pvc, acc) - if err != nil { - t.Errorf("Failed to gather pvc - %s", err.Error()) - } + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { + ks.gatherPersistentVolumeClaim(pvc, acc) } // Grab selector tags @@ -285,8 +367,7 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index 2f17f690d08c5..ed95dd63d970d 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - v1 "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -15,30 +15,35 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn return } for _, p := range list.Items { - if err = ki.gatherPod(*p, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPod(p, acc) } } -func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) error { - if p.Metadata.CreationTimestamp.GetSeconds() == 0 && p.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) { + creationTs := p.GetCreationTimestamp() + if creationTs.IsZero() { + return } - for i, cs := range p.Status.ContainerStatuses { - c := p.Spec.Containers[i] - gatherPodContainer(*p.Spec.NodeName, ki, p, *cs, *c, acc) + containerList := map[string]*corev1.ContainerStatus{} + for i := range p.Status.ContainerStatuses { + containerList[p.Status.ContainerStatuses[i].Name] = &p.Status.ContainerStatuses[i] } - return nil + for _, c := range p.Spec.Containers { + cs, ok := containerList[c.Name] + if !ok { + cs = &corev1.ContainerStatus{} + } + ki.gatherPodContainer(p, *cs, c, acc) + } } -func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) { +func (ki *KubernetesInventory) gatherPodContainer(p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { stateCode := 3 stateReason := "" state := "unknown" + readiness := "unready" switch { case cs.State.Running != nil: @@ -47,37 +52,46 @@ func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v case cs.State.Terminated != nil: stateCode = 1 state = "terminated" - stateReason = cs.State.Terminated.GetReason() + stateReason = cs.State.Terminated.Reason case cs.State.Waiting != nil: stateCode = 2 state = "waiting" - stateReason = cs.State.Waiting.GetReason() + stateReason = cs.State.Waiting.Reason } - readiness := "unready" - if cs.GetReady() { + if cs.Ready { readiness = "ready" } fields := map[string]interface{}{ - "restarts_total": cs.GetRestartCount(), - "state_code": stateCode, - "terminated_reason": cs.State.Terminated.GetReason(), + "restarts_total": cs.RestartCount, + "state_code": stateCode, + } + + // deprecated in 1.15: use `state_reason` instead + if state == "terminated" { + fields["terminated_reason"] = stateReason } if stateReason != "" { fields["state_reason"] = stateReason } + phaseReason := p.Status.Reason + if phaseReason != "" { + fields["phase_reason"] = phaseReason + } + tags := map[string]string{ - "container_name": *c.Name, - "namespace": *p.Metadata.Namespace, - "node_name": *p.Spec.NodeName, - "pod_name": *p.Metadata.Name, + "container_name": c.Name, + "namespace": p.Namespace, + "node_name": p.Spec.NodeName, + "pod_name": p.Name, + "phase": string(p.Status.Phase), "state": state, "readiness": readiness, } - for key, val := range p.GetSpec().GetNodeSelector() { + for key, val := range p.Spec.NodeSelector { if ki.selectorFilter.Match(key) { tags["node_selector_"+key] = val } @@ -89,17 +103,17 @@ func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v for resourceName, val := range req { switch resourceName { case "cpu": - fields["resource_requests_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + fields["resource_requests_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_requests_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["resource_requests_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } for resourceName, val := range lim { switch resourceName { case "cpu": - fields["resource_limits_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + fields["resource_limits_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_limits_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["resource_limits_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go index d9b3221655027..962805a67e3a3 100644 --- a/plugins/inputs/kube_inventory/pod_test.go +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -1,15 +1,17 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - v1 "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/apis/resource" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPod(t *testing.T) { @@ -18,21 +20,21 @@ func TestPod(t *testing.T) { selectExclude := []string{} now := time.Now() started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) - created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 0, 0, now.Location()) cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no pods", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/pods/": &v1.PodList{}, + "/pods/": &corev1.PodList{}, }, }, hasError: false, @@ -41,79 +43,79 @@ func TestPod(t *testing.T) { name: "collect pods", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/pods/": &v1.PodList{ - Items: []*v1.Pod{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ { - Spec: &v1.PodSpec{ - NodeName: toStrPtr("node1"), - Containers: []*v1.Container{ + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ { - Name: toStrPtr("running"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "running", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, { - Name: toStrPtr("completed"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "completed", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, { - Name: toStrPtr("waiting"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "waiting", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, }, - Volumes: []*v1.Volume{ + Volumes: []corev1.Volume{ { - Name: toStrPtr("vol1"), - VolumeSource: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: toStrPtr("pc1"), - ReadOnly: toBoolPtr(true), + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, }, }, }, { - Name: toStrPtr("vol2"), + Name: "vol2", }, }, NodeSelector: map[string]string{ @@ -121,153 +123,162 @@ func TestPod(t *testing.T) { "select2": "s2", }, }, - Status: &v1.PodStatus{ - Phase: toStrPtr("Running"), - HostIP: toStrPtr("180.12.10.18"), - PodIP: toStrPtr("10.244.2.15"), - StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, - Conditions: []*v1.PodCondition{ + Status: corev1.PodStatus{ + Phase: "Running", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ { - Type: toStrPtr("Initialized"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, { - Type: toStrPtr("Ready"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, }, { - Type: toStrPtr("Scheduled"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, }, - ContainerStatuses: []*v1.ContainerStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { - Name: toStrPtr("running"), - State: &v1.ContainerState{ - Running: &v1.ContainerStateRunning{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Name: "running", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: started}, }, }, - Ready: toBoolPtr(true), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: true, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, { - Name: toStrPtr("completed"), - State: &v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, - ExitCode: toInt32Ptr(0), - Reason: toStrPtr("Completed"), + Name: "completed", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + StartedAt: metav1.Time{Time: now}, + ExitCode: 0, + Reason: "Completed", }, }, - Ready: toBoolPtr(false), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: false, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, { - Name: toStrPtr("waiting"), - State: &v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{ - Reason: toStrPtr("PodUninitialized"), + Name: "waiting", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "PodUninitialized", }, }, - Ready: toBoolPtr(false), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: false, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, }, }, - Metadata: &metav1.ObjectMeta{ - OwnerReferences: []*metav1.OwnerReference{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ { - ApiVersion: toStrPtr("apps/v1"), - Kind: toStrPtr("DaemonSet"), - Name: toStrPtr("forwarder"), + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", Controller: toBoolPtr(true), }, }, - Generation: toInt64Ptr(11232), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pod1"), + Generation: 11232, + Namespace: "ns1", + Name: "pod1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: created}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 0, - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "running", - "node_name": "node1", - "pod_name": "pod1", - "state": "running", - "readiness": "ready", - "node_selector_select1": "s1", - "node_selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "running", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "running", + "readiness": "ready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 1, - "state_reason": "Completed", - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "completed", - "node_name": "node1", - "pod_name": "pod1", - "state": "terminated", - "readiness": "unready", - }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 0, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), }, - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 2, - "state_reason": "PodUninitialized", - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "waiting", - "node_name": "node1", - "pod_name": "pod1", - "state": "waiting", - "readiness": "unready", - }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "completed", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "terminated", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, - }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 1, + "state_reason": "Completed", + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + "terminated_reason": "Completed", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "waiting", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "waiting", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 2, + "state_reason": "PodUninitialized", + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -278,37 +289,23 @@ func TestPod(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { - err := ks.gatherPod(*pod, acc) - if err != nil { - t.Errorf("Failed to gather pod - %s", err.Error()) - } + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, i %d\n", v.name, k, m, acc.Metrics[i].Tags[k], i) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), i %d\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k], i) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -321,43 +318,43 @@ func TestPodSelectorFilter(t *testing.T) { cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) responseMap := map[string]interface{}{ - "/pods/": &v1.PodList{ - Items: []*v1.Pod{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ { - Spec: &v1.PodSpec{ - NodeName: toStrPtr("node1"), - Containers: []*v1.Container{ + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ { - Name: toStrPtr("forwarder"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "forwarder", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, }, - Volumes: []*v1.Volume{ + Volumes: []corev1.Volume{ { - Name: toStrPtr("vol1"), - VolumeSource: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: toStrPtr("pc1"), - ReadOnly: toBoolPtr(true), + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, }, }, }, { - Name: toStrPtr("vol2"), + Name: "vol2", }, }, NodeSelector: map[string]string{ @@ -365,61 +362,61 @@ func TestPodSelectorFilter(t *testing.T) { "select2": "s2", }, }, - Status: &v1.PodStatus{ - Phase: toStrPtr("Running"), - HostIP: toStrPtr("180.12.10.18"), - PodIP: toStrPtr("10.244.2.15"), - StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, - Conditions: []*v1.PodCondition{ + Status: corev1.PodStatus{ + Phase: "Running", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ { - Type: toStrPtr("Initialized"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, { - Type: toStrPtr("Ready"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, }, { - Type: toStrPtr("Scheduled"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, }, - ContainerStatuses: []*v1.ContainerStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { - Name: toStrPtr("forwarder"), - State: &v1.ContainerState{ - Running: &v1.ContainerStateRunning{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Name: "forwarder", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: now}, }, }, - Ready: toBoolPtr(true), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: true, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, }, }, - Metadata: &metav1.ObjectMeta{ - OwnerReferences: []*metav1.OwnerReference{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ { - ApiVersion: toStrPtr("apps/v1"), - Kind: toStrPtr("DaemonSet"), - Name: toStrPtr("forwarder"), + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", Controller: toBoolPtr(true), }, }, - Generation: toInt64Ptr(11232), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pod1"), + Generation: 11232, + Namespace: "ns1", + Name: "pod1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: created}, }, }, }, @@ -527,13 +524,10 @@ func TestPodSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { - err := ks.gatherPod(*pod, acc) - if err != nil { - t.Errorf("Failed to gather pod - %s", err.Error()) - } + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) } // Grab selector tags @@ -546,8 +540,212 @@ func TestPodSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } +} + +func TestPodPendingContainers(t *testing.T) { + cli := &client{} + selectInclude := []string{} + selectExclude := []string{} + now := time.Now() + started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) + cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output []telegraf.Metric + hasError bool + }{ + { + name: "collect pods", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ + { + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ + { + Name: "waiting", + Image: "image1", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + Protocol: "TCP", + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + }, + }, + { + Name: "terminated", + Image: "image1", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + Protocol: "TCP", + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, + }, + }, + }, + { + Name: "vol2", + }, + }, + NodeSelector: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + Status: corev1.PodStatus{ + Phase: "Pending", + Reason: "NetworkNotReady", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ + { + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, + }, + { + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, + }, + { + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{}, + }, + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", + Controller: toBoolPtr(true), + }, + }, + Generation: 11232, + Namespace: "ns1", + Name: "pod1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: created}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "waiting", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "terminated", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, } + require.NoError(t, ks.createSelectorFilters()) + acc := new(testutil.Accumulator) + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) + } + + err := acc.FirstError() + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue + } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go index 0c749ea8ac3fc..d589188605c85 100644 --- a/plugins/inputs/kube_inventory/service.go +++ b/plugins/inputs/kube_inventory/service.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,53 +15,51 @@ func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *Kubernet return } for _, i := range list.Items { - if err = ki.gatherService(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherService(i, acc) } } -func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumulator) error { - if s.Metadata.CreationTimestamp.GetSeconds() == 0 && s.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherService(s corev1.Service, acc telegraf.Accumulator) { + creationTs := s.GetCreationTimestamp() + if creationTs.IsZero() { + return } fields := map[string]interface{}{ - "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": s.Metadata.GetGeneration(), + "created": s.GetCreationTimestamp().UnixNano(), + "generation": s.Generation, } tags := map[string]string{ - "service_name": s.Metadata.GetName(), - "namespace": s.Metadata.GetNamespace(), + "service_name": s.Name, + "namespace": s.Namespace, } - for key, val := range s.GetSpec().GetSelector() { + for key, val := range s.Spec.Selector { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } var getPorts = func() { - for _, port := range s.GetSpec().GetPorts() { - fields["port"] = port.GetPort() - fields["target_port"] = port.GetTargetPort().GetIntVal() + for _, port := range s.Spec.Ports { + fields["port"] = port.Port + fields["target_port"] = port.TargetPort.IntVal - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) - if s.GetSpec().GetType() == "ExternalName" { - tags["external_name"] = s.GetSpec().GetExternalName() + if s.Spec.Type == "ExternalName" { + tags["external_name"] = s.Spec.ExternalName } else { - tags["cluster_ip"] = s.GetSpec().GetClusterIP() + tags["cluster_ip"] = s.Spec.ClusterIP } acc.AddFields(serviceMeasurement, fields, tags) } } - if externIPs := s.GetSpec().GetExternalIPs(); externIPs != nil { + if externIPs := s.Spec.ExternalIPs; externIPs != nil { for _, ip := range externIPs { tags["ip"] = ip @@ -71,6 +68,4 @@ func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumula } else { getPorts() } - - return nil } diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go index 3b1089130fbf7..b89a45a45dd5c 100644 --- a/plugins/inputs/kube_inventory/service_test.go +++ b/plugins/inputs/kube_inventory/service_test.go @@ -1,16 +1,17 @@ package kube_inventory import ( - "reflect" - + "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/influxdata/telegraf/testutil" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" - "strings" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestService(t *testing.T) { @@ -21,7 +22,7 @@ func TestService(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool include []string exclude []string @@ -30,7 +31,7 @@ func TestService(t *testing.T) { name: "no service", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/service/": &v1.ServiceList{}, + "/service/": &corev1.ServiceList{}, }, }, hasError: false, @@ -39,30 +40,32 @@ func TestService(t *testing.T) { name: "collect service", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/service/": &v1.ServiceList{ - Items: []*v1.Service{ + "/service/": &corev1.ServiceList{ + Items: []corev1.Service{ { - Spec: &v1.ServiceSpec{ - Ports: []*v1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { - Port: toInt32Ptr(8080), - TargetPort: toIntStrPtrI(1234), - Name: toStrPtr("diagnostic"), - Protocol: toStrPtr("TCP"), + Port: 8080, + TargetPort: intstr.IntOrString{ + IntVal: 1234, + }, + Name: "diagnostic", + Protocol: "TCP", }, }, ExternalIPs: []string{"1.0.0.127"}, - ClusterIP: toStrPtr("127.0.0.1"), + ClusterIP: "127.0.0.1", Selector: map[string]string{ "select1": "s1", "select2": "s2", }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("checker"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "checker", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -70,27 +73,27 @@ func TestService(t *testing.T) { }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "port": int32(8080), - "target_port": int32(1234), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "service_name": "checker", - "namespace": "ns1", - "port_name": "diagnostic", - "port_protocol": "TCP", - "cluster_ip": "127.0.0.1", - "ip": "1.0.0.127", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_service", + map[string]string{ + "service_name": "checker", + "namespace": "ns1", + "port_name": "diagnostic", + "port_protocol": "TCP", + "cluster_ip": "127.0.0.1", + "ip": "1.0.0.127", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "port": int32(8080), + "target_port": int32(1234), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -102,37 +105,23 @@ func TestService(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { - err := ks.gatherService(*service, acc) - if err != nil { - t.Errorf("Failed to gather service - %s", err.Error()) - } + for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { + ks.gatherService(service, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -142,30 +131,32 @@ func TestServiceSelectorFilter(t *testing.T) { now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) responseMap := map[string]interface{}{ - "/service/": &v1.ServiceList{ - Items: []*v1.Service{ + "/service/": &corev1.ServiceList{ + Items: []corev1.Service{ { - Spec: &v1.ServiceSpec{ - Ports: []*v1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { - Port: toInt32Ptr(8080), - TargetPort: toIntStrPtrI(1234), - Name: toStrPtr("diagnostic"), - Protocol: toStrPtr("TCP"), + Port: 8080, + TargetPort: intstr.IntOrString{ + IntVal: 1234, + }, + Name: "diagnostic", + Protocol: "TCP", }, }, ExternalIPs: []string{"1.0.0.127"}, - ClusterIP: toStrPtr("127.0.0.1"), + ClusterIP: "127.0.0.1", Selector: map[string]string{ "select1": "s1", "select2": "s2", }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("checker"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "checker", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -273,13 +264,10 @@ func TestServiceSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { - err := ks.gatherService(*service, acc) - if err != nil { - t.Errorf("Failed to gather service - %s", err.Error()) - } + for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { + ks.gatherService(service, acc) } // Grab selector tags @@ -292,8 +280,7 @@ func TestServiceSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index fe25f19f08440..06335fc612a1e 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/apps/v1" + v1 "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) @@ -16,36 +15,35 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube return } for _, s := range list.Items { - if err = ki.gatherStatefulSet(*s, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherStatefulSet(s, acc) } } -func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) { status := s.Status fields := map[string]interface{}{ - "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": *s.Metadata.Generation, - "replicas": *status.Replicas, - "replicas_current": *status.CurrentReplicas, - "replicas_ready": *status.ReadyReplicas, - "replicas_updated": *status.UpdatedReplicas, - "spec_replicas": *s.Spec.Replicas, - "observed_generation": *s.Status.ObservedGeneration, + "created": s.GetCreationTimestamp().UnixNano(), + "generation": s.Generation, + "replicas": status.Replicas, + "replicas_current": status.CurrentReplicas, + "replicas_ready": status.ReadyReplicas, + "replicas_updated": status.UpdatedReplicas, + "observed_generation": s.Status.ObservedGeneration, + } + if s.Spec.Replicas != nil { + fields["spec_replicas"] = *s.Spec.Replicas } tags := map[string]string{ - "statefulset_name": *s.Metadata.Name, - "namespace": *s.Metadata.Namespace, + "statefulset_name": s.Name, + "namespace": s.Namespace, } - for key, val := range s.GetSpec().GetSelector().GetMatchLabels() { - if ki.selectorFilter.Match(key) { - tags["selector_"+key] = val + if s.Spec.Selector != nil { + for key, val := range s.Spec.Selector.MatchLabels { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } } } acc.AddFields(statefulSetMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index 689cbadbc4b8d..6f30acc8b7435 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -1,15 +1,16 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestStatefulSet(t *testing.T) { @@ -21,7 +22,7 @@ func TestStatefulSet(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -38,16 +39,16 @@ func TestStatefulSet(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/statefulsets/": &v1.StatefulSetList{ - Items: []*v1.StatefulSet{ + Items: []v1.StatefulSet{ { - Status: &v1.StatefulSetStatus{ - Replicas: toInt32Ptr(2), - CurrentReplicas: toInt32Ptr(4), - ReadyReplicas: toInt32Ptr(1), - UpdatedReplicas: toInt32Ptr(3), - ObservedGeneration: toInt64Ptr(119), + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, }, - Spec: &v1.StatefulSetSpec{ + Spec: v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -56,43 +57,147 @@ func TestStatefulSet(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(332), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("sts1"), - Labels: map[string]string{ - "lab1": "v1", - "lab2": "v2", - }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "generation": int64(332), - "observed_generation": int64(119), - "created": now.UnixNano(), - "spec_replicas": int32(3), - "replicas": int32(2), - "replicas_current": int32(4), - "replicas_ready": int32(1), - "replicas_updated": int32(3), + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no label selector", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []v1.StatefulSet{ + { + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, + }, + Spec: v1.StatefulSetSpec{ + Replicas: toInt32Ptr(3), + Selector: nil, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, - Tags: map[string]string{ - "namespace": "ns1", - "statefulset_name": "sts1", - "selector_select1": "s1", - "selector_select2": "s2", + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no desired number of replicas", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []v1.StatefulSet{ + { + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, + }, + Spec: v1.StatefulSetSpec{ + Replicas: nil, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -103,37 +208,23 @@ func TestStatefulSet(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() - acc := new(testutil.Accumulator) + require.NoError(t, ks.createSelectorFilters()) + acc := &testutil.Accumulator{} for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(*ss, acc) - if err != nil { - t.Errorf("Failed to gather ss - %s", err.Error()) - } + ks.gatherStatefulSet(ss, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -144,16 +235,16 @@ func TestStatefulSetSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/statefulsets/": &v1.StatefulSetList{ - Items: []*v1.StatefulSet{ + Items: []v1.StatefulSet{ { - Status: &v1.StatefulSetStatus{ - Replicas: toInt32Ptr(2), - CurrentReplicas: toInt32Ptr(4), - ReadyReplicas: toInt32Ptr(1), - UpdatedReplicas: toInt32Ptr(3), - ObservedGeneration: toInt64Ptr(119), + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, }, - Spec: &v1.StatefulSetSpec{ + Spec: v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -162,15 +253,11 @@ func TestStatefulSetSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(332), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("sts1"), - Labels: map[string]string{ - "lab1": "v1", - "lab2": "v2", - }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -278,13 +365,10 @@ func TestStatefulSetSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(*ss, acc) - if err != nil { - t.Errorf("Failed to gather ss - %s", err.Error()) - } + ks.gatherStatefulSet(ss, acc) } // Grab selector tags @@ -297,8 +381,7 @@ func TestStatefulSetSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index a574bed06ffe4..8ef5ef7b1dfca 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -28,8 +28,6 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. @@ -154,8 +152,6 @@ kubernetes_system_container [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index a9bb6ef4850d8..8ca636d480cc2 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -3,15 +3,14 @@ package kubernetes import ( "encoding/json" "fmt" - "io/ioutil" "net/http" - "net/url" + "os" "strings" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -30,7 +29,7 @@ type Kubernetes struct { labelFilter filter.Filter // HTTP Timeout specified as a string - 3s, 1m, 1h - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig @@ -65,7 +64,6 @@ var sampleConfig = ` ` const ( - summaryEndpoint = `%s/stats/summary` defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" ) @@ -89,14 +87,13 @@ func (k *Kubernetes) Description() string { } func (k *Kubernetes) Init() error { - // If neither are provided, use the default service account. if k.BearerToken == "" && k.BearerTokenString == "" { k.BearerToken = defaultServiceAccountPath } if k.BearerToken != "" { - token, err := ioutil.ReadFile(k.BearerToken) + token, err := os.ReadFile(k.BearerToken) if err != nil { return err } @@ -118,18 +115,9 @@ func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { return nil } -func buildURL(endpoint string, base string) (*url.URL, error) { - u := fmt.Sprintf(endpoint, base) - addr, err := url.Parse(u) - if err != nil { - return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) - } - return addr, nil -} - func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { summaryMetrics := &SummaryMetrics{} - err := k.LoadJson(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) + err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) if err != nil { return err } @@ -140,7 +128,7 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err } buildSystemContainerMetrics(summaryMetrics, acc) buildNodeMetrics(summaryMetrics, acc) - buildPodMetrics(baseURL, summaryMetrics, podInfos, k.labelFilter, acc) + buildPodMetrics(summaryMetrics, podInfos, k.labelFilter, acc) return nil } @@ -193,19 +181,19 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) } func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Metadata, error) { - var podApi Pods - err := k.LoadJson(fmt.Sprintf("%s/pods", baseURL), &podApi) + var podAPI Pods + err := k.LoadJSON(fmt.Sprintf("%s/pods", baseURL), &podAPI) if err != nil { return nil, err } var podInfos []Metadata - for _, podMetadata := range podApi.Items { + for _, podMetadata := range podAPI.Items { podInfos = append(podInfos, podMetadata.Metadata) } return podInfos, nil } -func (k *Kubernetes) LoadJson(url string, v interface{}) error { +func (k *Kubernetes) LoadJSON(url string, v interface{}) error { var req, err = http.NewRequest("GET", url, nil) if err != nil { return err @@ -216,13 +204,13 @@ func (k *Kubernetes) LoadJson(url string, v interface{}) error { return err } if k.RoundTripper == nil { - if k.ResponseTimeout.Duration < time.Second { - k.ResponseTimeout.Duration = time.Second * 5 + if k.ResponseTimeout < config.Duration(time.Second) { + k.ResponseTimeout = config.Duration(time.Second * 5) } k.RoundTripper = &http.Transport{ TLSHandshakeTimeout: 5 * time.Second, TLSClientConfig: tlsCfg, - ResponseHeaderTimeout: k.ResponseTimeout.Duration, + ResponseHeaderTimeout: time.Duration(k.ResponseTimeout), } } req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) @@ -244,8 +232,19 @@ func (k *Kubernetes) LoadJson(url string, v interface{}) error { return nil } -func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { +func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { for _, pod := range summaryMetrics.Pods { + podLabels := make(map[string]string) + for _, info := range podInfo { + if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { + for k, v := range info.Labels { + if labelFilter.Match(k) { + podLabels[k] = v + } + } + } + } + for _, container := range pod.Containers { tags := map[string]string{ "node_name": summaryMetrics.Node.NodeName, @@ -253,16 +252,9 @@ func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []M "container_name": container.Name, "pod_name": pod.PodRef.Name, } - for _, info := range podInfo { - if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { - for k, v := range info.Labels { - if labelFilter.Match(k) { - tags[k] = v - } - } - } + for k, v := range podLabels { + tags[k] = v } - fields := make(map[string]interface{}) fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds @@ -287,6 +279,9 @@ func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []M "namespace": pod.PodRef.Namespace, "volume_name": volume.Name, } + for k, v := range podLabels { + tags[k] = v + } fields := make(map[string]interface{}) fields["available_bytes"] = volume.AvailableBytes fields["capacity_bytes"] = volume.CapacityBytes @@ -299,6 +294,9 @@ func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []M "pod_name": pod.PodRef.Name, "namespace": pod.PodRef.Namespace, } + for k, v := range podLabels { + tags[k] = v + } fields := make(map[string]interface{}) fields["rx_bytes"] = pod.Network.RXBytes fields["rx_errors"] = pod.Network.RXErrors diff --git a/plugins/inputs/kubernetes/kubernetes_pods.go b/plugins/inputs/kubernetes/kubernetes_pods.go index 672608e54fe25..29d5e77895266 100644 --- a/plugins/inputs/kubernetes/kubernetes_pods.go +++ b/plugins/inputs/kubernetes/kubernetes_pods.go @@ -2,7 +2,7 @@ package kubernetes type Pods struct { Kind string `json:"kind"` - ApiVersion string `json:"apiVersion"` + APIVersion string `json:"apiVersion"` Items []Item `json:"items"` } diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index faf40be3e1000..864905448780d 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -15,13 +15,14 @@ func TestKubernetesStats(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.RequestURI == "/stats/summary" { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responseStatsSummery) + _, err := fmt.Fprintln(w, responseStatsSummery) + require.NoError(t, err) } if r.RequestURI == "/pods" { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responsePods) + _, err := fmt.Fprintln(w, responsePods) + require.NoError(t, err) } - })) defer ts.Close() @@ -140,6 +141,8 @@ func TestKubernetesStats(t *testing.T) { "volume_name": "volume1", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_volume", fields, tags) @@ -153,9 +156,10 @@ func TestKubernetesStats(t *testing.T) { "node_name": "node1", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags) - } var responsePods = ` diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index 7553c33c777b2..a77e99df61f6e 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -8,6 +8,7 @@ import ( "github.com/aristanetworks/goarista/lanz" pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -43,23 +44,22 @@ func (l *Lanz) Description() string { return "Read metrics off Arista LANZ, via socket" } -func (l *Lanz) Gather(acc telegraf.Accumulator) error { +func (l *Lanz) Gather(_ telegraf.Accumulator) error { return nil } func (l *Lanz) Start(acc telegraf.Accumulator) error { - if len(l.Servers) == 0 { l.Servers = append(l.Servers, "tcp://127.0.0.1:50001") } for _, server := range l.Servers { - deviceUrl, err := url.Parse(server) + deviceURL, err := url.Parse(server) if err != nil { return err } client := lanz.New( - lanz.WithAddr(deviceUrl.Host), + lanz.WithAddr(deviceURL.Host), lanz.WithBackoff(1*time.Second), lanz.WithTimeout(10*time.Second), ) @@ -72,7 +72,7 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error { l.wg.Add(1) go func() { l.wg.Done() - receive(acc, in, deviceUrl) + receive(acc, in, deviceURL) }() } return nil @@ -85,19 +85,20 @@ func (l *Lanz) Stop() { l.wg.Wait() } -func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceUrl *url.URL) { +func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceURL *url.URL) { + //nolint:gosimple // for-select used on purpose for { select { case msg, ok := <-in: if !ok { return } - msgToAccumulator(acc, msg, deviceUrl) + msgToAccumulator(acc, msg, deviceURL) } } } -func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *url.URL) { +func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceURL *url.URL) { cr := msg.GetCongestionRecord() if cr != nil { vals := map[string]interface{}{ @@ -114,8 +115,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u "entry_type": strconv.FormatInt(int64(cr.GetEntryType()), 10), "traffic_class": strconv.FormatInt(int64(cr.GetTrafficClass()), 10), "fabric_peer_intf_name": cr.GetFabricPeerIntfName(), - "source": deviceUrl.Hostname(), - "port": deviceUrl.Port(), + "source": deviceURL.Hostname(), + "port": deviceURL.Port(), } acc.AddFields("lanz_congestion_record", vals, tags) } @@ -129,8 +130,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u } tags := map[string]string{ "entry_type": strconv.FormatInt(int64(gbur.GetEntryType()), 10), - "source": deviceUrl.Hostname(), - "port": deviceUrl.Port(), + "source": deviceURL.Hostname(), + "port": deviceURL.Port(), } acc.AddFields("lanz_global_buffer_usage_record", vals, tags) } diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go index 5f9c7ab24cb40..f2a8b5815e36d 100644 --- a/plugins/inputs/lanz/lanz_test.go +++ b/plugins/inputs/lanz/lanz_test.go @@ -6,7 +6,8 @@ import ( "testing" pb "github.com/aristanetworks/goarista/lanz/proto" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" + "github.com/influxdata/telegraf/testutil" ) @@ -51,23 +52,22 @@ var testProtoBufGlobalBufferUsageRecord = &pb.LanzRecord{ } func TestLanzGeneratesMetrics(t *testing.T) { - var acc testutil.Accumulator l := NewLanz() l.Servers = append(l.Servers, "tcp://switch01.int.example.com:50001") l.Servers = append(l.Servers, "tcp://switch02.int.example.com:50001") - deviceUrl1, err := url.Parse(l.Servers[0]) + deviceURL1, err := url.Parse(l.Servers[0]) if err != nil { t.Fail() } - deviceUrl2, err := url.Parse(l.Servers[1]) + deviceURL2, err := url.Parse(l.Servers[1]) if err != nil { t.Fail() } - msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceUrl1) + msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceURL1) acc.Wait(1) vals1 := map[string]interface{}{ @@ -92,7 +92,7 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals1, tags1) acc.ClearMetrics() - msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceUrl2) + msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceURL2) acc.Wait(1) vals2 := map[string]interface{}{ @@ -117,7 +117,7 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals2, tags2) acc.ClearMetrics() - msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceUrl1) + msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceURL1) acc.Wait(1) gburVals1 := map[string]interface{}{ @@ -133,5 +133,4 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsFields(t, "lanz_global_buffer_usage_record", gburVals1) acc.AssertContainsTaggedFields(t, "lanz_global_buffer_usage_record", gburVals1, gburTags1) - } diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 7e5ae25d4743d..bcb992b6fb6f7 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -162,8 +162,7 @@ func (l *LeoFS) Description() string { func (l *LeoFS) Gather(acc telegraf.Accumulator) error { if len(l.Servers) == 0 { - l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) - return nil + return l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) } var wg sync.WaitGroup for _, endpoint := range l.Servers { @@ -206,7 +205,11 @@ func (l *LeoFS) gatherServer( if err != nil { return err } - cmd.Start() + if err := cmd.Start(); err != nil { + return err + } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive defer internal.WaitTimeout(cmd, time.Second*5) scanner := bufio.NewScanner(stdout) if !scanner.Scan() { diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index f456a998e73a6..1e33ddc4c3d38 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,14 +1,13 @@ package leofs import ( - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "io/ioutil" - "log" "os" "os/exec" + "runtime" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var fakeSNMP4Manager = ` @@ -124,31 +123,23 @@ func main() { } ` -func makeFakeSNMPSrc(code string) string { - path := os.TempDir() + "/test.go" - err := ioutil.WriteFile(path, []byte(code), 0600) - if err != nil { - log.Fatalln(err) - } - return path -} - -func buildFakeSNMPCmd(src string) { - err := exec.Command("go", "build", "-o", "snmpwalk", src).Run() - if err != nil { - log.Fatalln(err) +func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { + executable := "snmpwalk" + if runtime.GOOS == "windows" { + executable = "snmpwalk.exe" } -} -func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { // Build the fake snmpwalk for test - src := makeFakeSNMPSrc(code) + src := os.TempDir() + "/test.go" + require.NoError(t, os.WriteFile(src, []byte(code), 0600)) defer os.Remove(src) - buildFakeSNMPCmd(src) - defer os.Remove("./snmpwalk") + + require.NoError(t, exec.Command("go", "build", "-o", executable, src).Run()) + defer os.Remove("./" + executable) + envPathOrigin := os.Getenv("PATH") // Refer to the fake snmpwalk - os.Setenv("PATH", ".") + require.NoError(t, os.Setenv("PATH", ".")) defer os.Setenv("PATH", envPathOrigin) l := &LeoFS{ @@ -164,7 +155,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) floatMetrics := KeyMapping[serverType] for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("leofs", metric), metric) + require.True(t, acc.HasFloatField("leofs", metric), metric) } } diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index ed24963404fc2..19848b6db0e37 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -2,7 +2,7 @@ package linux_sysctl_fs import ( "bytes" - "io/ioutil" + "errors" "os" "strconv" @@ -20,16 +20,20 @@ type SysctlFS struct { var sysctlFSDescription = `Provides Linux sysctl fs metrics` var sysctlFSSampleConfig = `` -func (_ SysctlFS) Description() string { +func (sfs SysctlFS) Description() string { return sysctlFSDescription } -func (_ SysctlFS) SampleConfig() string { +func (sfs SysctlFS) SampleConfig() string { return sysctlFSSampleConfig } func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + file) + bs, err := os.ReadFile(sfs.path + "/" + file) if err != nil { + // Ignore non-existing entries + if errors.Is(err, os.ErrNotExist) { + return nil + } return err } @@ -53,8 +57,12 @@ func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fiel } func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + name) + bs, err := os.ReadFile(sfs.path + "/" + name) if err != nil { + // Ignore non-existing entries + if errors.Is(err, os.ErrNotExist) { + return nil + } return err } @@ -71,12 +79,23 @@ func (sfs *SysctlFS) Gather(acc telegraf.Accumulator) error { fields := map[string]interface{}{} for _, n := range []string{"aio-nr", "aio-max-nr", "dquot-nr", "dquot-max", "super-nr", "super-max"} { - sfs.gatherOne(n, fields) + if err := sfs.gatherOne(n, fields); err != nil { + return err + } } - sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr") - sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages") - sfs.gatherList("file-nr", fields, "file-nr", "", "file-max") + err := sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr") + if err != nil { + return err + } + err = sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages") + if err != nil { + return err + } + err = sfs.gatherList("file-nr", fields, "file-nr", "", "file-max") + if err != nil { + return err + } acc.AddFields("linux_sysctl_fs", fields, nil) return nil @@ -91,7 +110,6 @@ func GetHostProc() string { } func init() { - inputs.Add("linux_sysctl_fs", func() telegraf.Input { return &SysctlFS{ path: path.Join(GetHostProc(), "/sys/fs"), diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go index 78011e288b962..8b76b266b1c9e 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go @@ -1,7 +1,6 @@ package linux_sysctl_fs import ( - "io/ioutil" "os" "testing" @@ -10,16 +9,16 @@ import ( ) func TestSysctlFSGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - require.NoError(t, ioutil.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-max", []byte("103\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-max", []byte("103\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) sfs := &SysctlFS{ path: td, diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 0abdba2c972df..8cc513e98cb70 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,11 +1,11 @@ # Logparser Input Plugin +### Deprecated in Telegraf 1.15: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. + The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. -**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. - The `tail` plugin now provides all the functionality of the `logparser` plugin. Most options can be translated directly to the `tail` plugin: - For options in the `[inputs.logparser.grok]` section, the equivalent option diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 4fbd2e90d921c..83f5abd210bdd 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -1,3 +1,4 @@ +//go:build !solaris // +build !solaris package logparser @@ -143,7 +144,7 @@ func (l *LogParserPlugin) Init() error { } // Gather is the primary function to collect the metrics for the plugin -func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { +func (l *LogParserPlugin) Gather(_ telegraf.Accumulator) error { l.Lock() defer l.Unlock() @@ -271,7 +272,6 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { - if line.Err != nil { l.Log.Errorf("Error tailing file %s, Error: %s", tailer.Filename, line.Err) @@ -321,7 +321,6 @@ func (l *LogParserPlugin) parser() { } else { l.Log.Errorf("Error parsing log line: %s", err.Error()) } - } } diff --git a/plugins/inputs/logparser/logparser_solaris.go b/plugins/inputs/logparser/logparser_solaris.go index 28afe26772846..da482b97d27be 100644 --- a/plugins/inputs/logparser/logparser_solaris.go +++ b/plugins/inputs/logparser/logparser_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris // +build solaris package logparser diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 142f78d464963..a2f780afd21b9 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -1,60 +1,58 @@ package logparser import ( - "io/ioutil" "os" - "runtime" - "strings" + "path/filepath" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" +) + +var ( + testdataDir = getTestdataDir() ) func TestStartNoParsers(t *testing.T) { logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{"testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, } acc := testutil.Accumulator{} - assert.Error(t, logparser.Start(&acc)) + require.Error(t, logparser.Start(&acc)) } func TestGrokParseLogFilesNonExistPattern(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, GrokConfig: GrokConfig{ Patterns: []string{"%{FOOBAR}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } acc := testutil.Accumulator{} err := logparser.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestGrokParseLogFiles(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}", "%{TEST_LOG_C}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, FromBeginning: true, - Files: []string{thisdir + "testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, } acc := testutil.Accumulator{} @@ -68,7 +66,7 @@ func TestGrokParseLogFiles(t *testing.T) { "logparser_grok", map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_a.log", + "path": filepath.Join(testdataDir, "test_a.log"), }, map[string]interface{}{ "clientip": "192.168.1.1", @@ -81,7 +79,7 @@ func TestGrokParseLogFiles(t *testing.T) { testutil.MustMetric( "logparser_grok", map[string]string{ - "path": thisdir + "testdata/test_b.log", + "path": filepath.Join(testdataDir, "test_b.log"), }, map[string]interface{}{ "myfloat": 1.25, @@ -93,7 +91,7 @@ func TestGrokParseLogFiles(t *testing.T) { testutil.MustMetric( "logparser_grok", map[string]string{ - "path": thisdir + "testdata/test_c.log", + "path": filepath.Join(testdataDir, "test_c.log"), "response_code": "200", }, map[string]interface{}{ @@ -111,30 +109,33 @@ func TestGrokParseLogFiles(t *testing.T) { } func TestGrokParseLogFilesAppearLater(t *testing.T) { - emptydir, err := ioutil.TempDir("", "TestGrokParseLogFilesAppearLater") + emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") defer os.RemoveAll(emptydir) - assert.NoError(t, err) - - thisdir := getCurrentDir() + require.NoError(t, err) logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{emptydir + "/*.log"}, + Files: []string{filepath.Join(emptydir, "*.log")}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } acc := testutil.Accumulator{} - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) - _ = os.Symlink(thisdir+"testdata/test_a.log", emptydir+"/test_a.log") - assert.NoError(t, acc.GatherError(logparser.Gather)) + input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) + require.NoError(t, err) + + err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) + require.NoError(t, err) + + require.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) logparser.Stop() @@ -148,29 +149,27 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": emptydir + "/test_a.log", + "path": filepath.Join(emptydir, "test_a.log"), }) } // Test that test_a.log line gets parsed even though we don't have the correct // pattern available for test_b.log func TestGrokParseLogFilesOneBad(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "testdata/test_a.log"}, + Files: []string{filepath.Join(testdataDir, "test_a.log")}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() @@ -184,27 +183,25 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_a.log", + "path": filepath.Join(testdataDir, "test_a.log"), }) } func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_C}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, FromBeginning: true, - Files: []string{thisdir + "testdata/test_c.log"}, + Files: []string{filepath.Join(testdataDir, "test_c.log")}, } acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() @@ -218,11 +215,16 @@ func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_c.log", + "path": filepath.Join(testdataDir, "test_c.log"), }) } -func getCurrentDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "logparser_test.go", "", 1) +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") } diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index 9571de5fd8873..95ec3e6feae66 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -42,6 +42,8 @@ Logstash 5 and later is supported. ### Metrics +Additional plugin stats may be collected (because logstash doesn't consistently expose all stats) + - logstash_jvm - tags: - node_id @@ -125,6 +127,10 @@ Logstash 5 and later is supported. - duration_in_millis - in - out + - bulk_requests_failures (for Logstash 7+) + - bulk_requests_with_errors (for Logstash 7+) + - documents_successes (for logstash 7+) + - documents_retryable_failures (for logstash 7+) - logstash_queue - tags: diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index e360ba032ff35..9f5a198587e4d 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -4,14 +4,13 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -59,7 +58,7 @@ type Logstash struct { Username string `toml:"username"` Password string `toml:"password"` Headers map[string]string `toml:"headers"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -72,7 +71,7 @@ func NewLogstash() *Logstash { SinglePipeline: false, Collect: []string{"pipelines", "process", "jvm"}, Headers: make(map[string]string), - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -126,9 +125,11 @@ type Pipeline struct { } type Plugin struct { - ID string `json:"id"` - Events interface{} `json:"events"` - Name string `json:"name"` + ID string `json:"id"` + Events interface{} `json:"events"` + Name string `json:"name"` + BulkRequests map[string]interface{} `json:"bulk_requests"` + Documents map[string]interface{} `json:"documents"` } type PipelinePlugins struct { @@ -138,10 +139,13 @@ type PipelinePlugins struct { } type PipelineQueue struct { - Events float64 `json:"events"` - Type string `json:"type"` - Capacity interface{} `json:"capacity"` - Data interface{} `json:"data"` + Events float64 `json:"events"` + EventsCount *float64 `json:"events_count"` + Type string `json:"type"` + Capacity interface{} `json:"capacity"` + Data interface{} `json:"data"` + QueueSizeInBytes *float64 `json:"queue_size_in_bytes"` + MaxQueueSizeInBytes *float64 `json:"max_queue_size_in_bytes"` } const jvmStats = "/_node/stats/jvm" @@ -149,16 +153,16 @@ const processStats = "/_node/stats/process" const pipelinesStats = "/_node/stats/pipelines" const pipelineStats = "/_node/stats/pipeline" -func (i *Logstash) Init() error { - err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"}) +func (logstash *Logstash) Init() error { + err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"}) if err != nil { return fmt.Errorf(`cannot verify "collect" setting: %v`, err) } return nil } -// createHttpClient create a clients to access API -func (logstash *Logstash) createHttpClient() (*http.Client, error) { +// createHTTPClient create a clients to access API +func (logstash *Logstash) createHTTPClient() (*http.Client, error) { tlsConfig, err := logstash.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -168,15 +172,15 @@ func (logstash *Logstash) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: logstash.Timeout.Duration, + Timeout: time.Duration(logstash.Timeout), } return client, nil } -// gatherJsonData query the data source and parse the response JSON -func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { - request, err := http.NewRequest("GET", url, nil) +// gatherJSONData query the data source and parse the response JSON +func (logstash *Logstash) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -201,8 +205,8 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -214,10 +218,10 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { } // gatherJVMStats gather the JVM metrics and add results to the accumulator -func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error { jvmStats := &JVMStats{} - err := logstash.gatherJsonData(url, jvmStats) + err := logstash.gatherJSONData(address, jvmStats) if err != nil { return err } @@ -240,10 +244,10 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu } // gatherJVMStats gather the Process metrics and add results to the accumulator -func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error { processStats := &ProcessStats{} - err := logstash.gatherJsonData(url, processStats) + err := logstash.gatherJSONData(address, processStats) if err != nil { return err } @@ -270,8 +274,8 @@ func (logstash *Logstash) gatherPluginsStats( plugins []Plugin, pluginType string, tags map[string]string, - accumulator telegraf.Accumulator) error { - + accumulator telegraf.Accumulator, +) error { for _, plugin := range plugins { pluginTags := map[string]string{ "plugin_name": plugin.Name, @@ -287,6 +291,63 @@ func (logstash *Logstash) gatherPluginsStats( return err } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + /* + The elasticsearch output produces additional stats around + bulk requests and document writes (that are elasticsearch specific). + Collect those here + */ + if pluginType == "output" && plugin.Name == "elasticsearch" { + /* + The "bulk_requests" section has details about batch writes + into Elasticsearch + + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + }, + "failures": 262, + "with_errors": 9089 + }, + */ + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", plugin.BulkRequests) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "bulk_requests") { + continue + } + newKey := fmt.Sprintf("bulk_requests_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + + /* + The "documents" section has counts of individual documents + written/retried/etc. + "documents" : { + "successes" : 2665549, + "retryable_failures": 13733 + } + */ + flattener = jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", plugin.Documents) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "documents") { + continue + } + newKey := fmt.Sprintf("documents_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + } } return nil @@ -295,9 +356,8 @@ func (logstash *Logstash) gatherPluginsStats( func (logstash *Logstash) gatherQueueStats( queue *PipelineQueue, tags map[string]string, - accumulator telegraf.Accumulator) error { - - var err error + accumulator telegraf.Accumulator, +) error { queueTags := map[string]string{ "queue_type": queue.Type, } @@ -305,13 +365,18 @@ func (logstash *Logstash) gatherQueueStats( queueTags[tag] = value } + events := queue.Events + if queue.EventsCount != nil { + events = *queue.EventsCount + } + queueFields := map[string]interface{}{ - "events": queue.Events, + "events": events, } if queue.Type != "memory" { flattener := jsonParser.JSONFlattener{} - err = flattener.FlattenJSON("", queue.Capacity) + err := flattener.FlattenJSON("", queue.Capacity) if err != nil { return err } @@ -322,6 +387,14 @@ func (logstash *Logstash) gatherQueueStats( for field, value := range flattener.Fields { queueFields[field] = value } + + if queue.MaxQueueSizeInBytes != nil { + queueFields["max_queue_size_in_bytes"] = *queue.MaxQueueSizeInBytes + } + + if queue.QueueSizeInBytes != nil { + queueFields["queue_size_in_bytes"] = *queue.QueueSizeInBytes + } } accumulator.AddFields("logstash_queue", queueFields, queueTags) @@ -330,10 +403,10 @@ func (logstash *Logstash) gatherQueueStats( } // gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6) -func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error { pipelineStats := &PipelineStats{} - err := logstash.gatherJsonData(url, pipelineStats) + err := logstash.gatherJSONData(address, pipelineStats) if err != nil { return err } @@ -374,10 +447,10 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A } // gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6) -func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error { pipelinesStats := &PipelinesStats{} - err := logstash.gatherJsonData(url, pipelinesStats) + err := logstash.gatherJSONData(address, pipelinesStats) if err != nil { return err } @@ -423,7 +496,7 @@ func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf. // Gather ask this plugin to start gathering metrics func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { if logstash.client == nil { - client, err := logstash.createHttpClient() + client, err := logstash.createHTTPClient() if err != nil { return err @@ -432,40 +505,40 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { } if choice.Contains("jvm", logstash.Collect) { - jvmUrl, err := url.Parse(logstash.URL + jvmStats) + jvmURL, err := url.Parse(logstash.URL + jvmStats) if err != nil { return err } - if err := logstash.gatherJVMStats(jvmUrl.String(), accumulator); err != nil { + if err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil { return err } } if choice.Contains("process", logstash.Collect) { - processUrl, err := url.Parse(logstash.URL + processStats) + processURL, err := url.Parse(logstash.URL + processStats) if err != nil { return err } - if err := logstash.gatherProcessStats(processUrl.String(), accumulator); err != nil { + if err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil { return err } } if choice.Contains("pipelines", logstash.Collect) { if logstash.SinglePipeline { - pipelineUrl, err := url.Parse(logstash.URL + pipelineStats) + pipelineURL, err := url.Parse(logstash.URL + pipelineStats) if err != nil { return err } - if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil { + if err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil { return err } } else { - pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats) + pipelinesURL, err := url.Parse(logstash.URL + pipelinesStats) if err != nil { return err } - if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil { + if err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil { return err } } diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index aeb4e46f8dbb6..089824c58767f 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var logstashTest = NewLogstash() @@ -16,6 +17,7 @@ var logstashTest = NewLogstash() var ( logstash5accPipelineStats testutil.Accumulator logstash6accPipelinesStats testutil.Accumulator + logstash7accPipelinesStats testutil.Accumulator logstash5accProcessStats testutil.Accumulator logstash6accProcessStats testutil.Accumulator logstash5accJVMStats testutil.Accumulator @@ -25,28 +27,23 @@ var ( func Test_Logstash5GatherProcessStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats); err != nil { - test.Logf("Can't gather Process stats") - } + err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats) + require.NoError(test, err, "Can't gather Process stats") logstash5accProcessStats.AssertContainsTaggedFields( test, @@ -74,28 +71,23 @@ func Test_Logstash5GatherProcessStats(test *testing.T) { func Test_Logstash6GatherProcessStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats); err != nil { - test.Logf("Can't gather Process stats") - } + err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats) + require.NoError(test, err, "Can't gather Process stats") logstash6accProcessStats.AssertContainsTaggedFields( test, @@ -124,28 +116,23 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { //logstash5accPipelineStats.SetDebug(true) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats); err != nil { - test.Logf("Can't gather Pipeline stats") - } + err = logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats) + require.NoError(test, err, "Can't gather Pipeline stats") logstash5accPipelineStats.AssertContainsTaggedFields( test, @@ -226,28 +213,23 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { //logstash6accPipelinesStats.SetDebug(true) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats); err != nil { - test.Logf("Can't gather Pipeline stats") - } + err = logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats) + require.NoError(test, err, "Can't gather Pipeline stats") fields := make(map[string]interface{}) fields["duration_in_millis"] = float64(8540751.0) @@ -549,34 +531,28 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { "queue_type": string("persisted"), }, ) - } func Test_Logstash5GatherJVMStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats); err != nil { - test.Logf("Can't gather JVM stats") - } + err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats) + require.NoError(test, err, "Can't gather JVM stats") logstash5accJVMStats.AssertContainsTaggedFields( test, @@ -618,34 +594,28 @@ func Test_Logstash5GatherJVMStats(test *testing.T) { "node_version": string("5.3.0"), }, ) - } func Test_Logstash6GatherJVMStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil { - test.Logf("Can't gather JVM stats") - } + err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats) + require.NoError(test, err, "Can't gather JVM stats") logstash6accJVMStats.AssertContainsTaggedFields( test, @@ -687,5 +657,130 @@ func Test_Logstash6GatherJVMStats(test *testing.T) { "node_version": string("6.4.2"), }, ) +} +func Test_Logstash7GatherPipelinesQueueStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + _, err := fmt.Fprintf(writer, "%s", string(logstash7PipelinesJSON)) + if err != nil { + test.Logf("Can't print test json") + } + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHTTPClient() + + if err != nil { + test.Logf("Can't createHTTPClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash7accPipelinesStats); err != nil { + test.Logf("Can't gather Pipeline stats") + } + + fields := make(map[string]interface{}) + fields["duration_in_millis"] = float64(3032875.0) + fields["queue_push_duration_in_millis"] = float64(13300.0) + fields["in"] = float64(2665549.0) + fields["filtered"] = float64(2665549.0) + fields["out"] = float64(2665549.0) + + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_events", + fields, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + }, + ) + + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2802177.0), + "in": float64(2665549.0), + "out": float64(2665549.0), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "bulk_requests_successes": float64(2870), + "bulk_requests_responses_200": float64(2870), + "bulk_requests_failures": float64(262), + "bulk_requests_with_errors": float64(9089), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "documents_successes": float64(2665549), + "documents_retryable_failures": float64(13733), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_queue", + map[string]interface{}{ + "events": float64(0), + "max_queue_size_in_bytes": float64(4294967296), + "queue_size_in_bytes": float64(32028566), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "queue_type": string("persisted"), + }, + ) } diff --git a/plugins/inputs/logstash/samples_logstash7.go b/plugins/inputs/logstash/samples_logstash7.go new file mode 100644 index 0000000000000..e04bb4319a27a --- /dev/null +++ b/plugins/inputs/logstash/samples_logstash7.go @@ -0,0 +1,140 @@ +package logstash + +const logstash7PipelinesJSON = ` +{ + "host" : "HOST01.local", + "version" : "7.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "28580380-ad2c-4032-934b-76359125edca", + "name" : "HOST01.local", + "ephemeral_id" : "bd95ff6b-3fa8-42ae-be32-098a4e4ea1ec", + "status" : "green", + "snapshot" : true, + "pipeline" : { + "workers" : 8, + "batch_size" : 125, + "batch_delay" : 50 + }, + "pipelines" : { + "infra" : { + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 3032875, + "filtered" : 2665549, + "queue_push_duration_in_millis" : 13300 + }, + "plugins" : { + "inputs" : [ { + "id" : "8526dc80bc2257ab08f96018f96b0c68dd03abc5695bb22fb9e96339a8dfb4f86", + "events" : { + "out" : 2665549, + "queue_push_duration_in_millis" : 13300 + }, + "peak_connections" : 1, + "name" : "beats", + "current_connections" : 1 + } ], + "codecs" : [ { + "id" : "plain_7312c097-1e7f-41db-983b-4f5a87a9eba2", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "plain", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + }, { + "id" : "rubydebug_e958e3dc-10f6-4dd6-b7c5-ae3de2892afb", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "rubydebug", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + }, { + "id" : "plain_addb97be-fb77-4cbc-b45c-0424cd5d0ac7", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "plain", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + } ], + "filters" : [ { + "id" : "9e8297a6ee7b61864f77853317dccde83d29952ef869010c385dcfc9064ab8b8", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 8648 + }, + "name" : "date", + "matches" : 2665549 + }, { + "id" : "bec0c77b3f53a78c7878449c72ec59f97be31c1f12f9621f61ed2d4563bad869", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 195138 + }, + "name" : "fingerprint" + } ], + "outputs" : [ { + "id" : "df59066a933f038354c1845ba44de692f70dbd0d2009ab07a12b98b776be7e3f", + "events" : { + "in" : 0, + "out" : 0, + "duration_in_millis" : 25 + }, + "name" : "stdout" + }, { + "id" : "38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 2802177 + }, + "name" : "elasticsearch", + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + }, + "failures": 262, + "with_errors": 9089 + }, + "documents" : { + "successes" : 2665549, + "retryable_failures": 13733 + } + } ] + }, + "reloads" : { + "successes" : 4, + "last_error" : null, + "failures" : 0, + "last_success_timestamp" : "2020-06-05T08:06:12.538Z", + "last_failure_timestamp" : null + }, + "queue" : { + "type" : "persisted", + "events_count" : 0, + "queue_size_in_bytes" : 32028566, + "max_queue_size_in_bytes" : 4294967296 + }, + "hash" : "5bc589ae4b02cb3e436626429b50928b9d99360639c84dc7fc69268ac01a9fd0", + "ephemeral_id" : "4bcacefa-6cbf-461e-b14e-184edd9ebdf3" + } + } +}` diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 611ba294dbc5c..abd5ce87c6bbb 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -1,16 +1,16 @@ -/* -Lustre 2.x telegraf plugin +//go:build !windows +// +build !windows -Lustre (http://lustre.org/) is an open-source, parallel file system -for HPC environments. It stores statistics about its activity in -/proc - -*/ +// Package lustre2 (doesn't aim for Windows) +// Lustre 2.x Telegraf plugin +// Lustre (http://lustre.org/) is an open-source, parallel file system +// for HPC environments. It stores statistics about its activity in /proc package lustre2 import ( - "io/ioutil" + "os" "path/filepath" + "regexp" "strconv" "strings" @@ -25,8 +25,8 @@ type tags struct { // Lustre proc files can change between versions, so we want to future-proof // by letting people choose what to look at. type Lustre2 struct { - Ost_procfiles []string `toml:"ost_procfiles"` - Mds_procfiles []string `toml:"mds_procfiles"` + OstProcfiles []string `toml:"ost_procfiles"` + MdsProcfiles []string `toml:"mds_procfiles"` // allFields maps and OST name to the metric fields associated with that OST allFields map[tags]map[string]interface{} @@ -55,10 +55,9 @@ type mapping struct { inProc string // What to look for at the start of a line in /proc/fs/lustre/* field uint32 // which field to extract from that line reportAs string // What measurement name to use - tag string // Additional tag to add for this metric } -var wanted_ost_fields = []*mapping{ +var wantedOstFields = []*mapping{ { inProc: "write_bytes", field: 6, @@ -90,7 +89,7 @@ var wanted_ost_fields = []*mapping{ }, } -var wanted_ost_jobstats_fields = []*mapping{ +var wantedOstJobstatsFields = []*mapping{ { // The read line has several fields, so we need to differentiate what they are inProc: "read", field: 3, @@ -223,7 +222,7 @@ var wanted_ost_jobstats_fields = []*mapping{ }, } -var wanted_mds_fields = []*mapping{ +var wantedMdsFields = []*mapping{ { inProc: "open", }, @@ -274,7 +273,7 @@ var wanted_mds_fields = []*mapping{ }, } -var wanted_mdt_jobstats_fields = []*mapping{ +var wantedMdtJobstatsFields = []*mapping{ { inProc: "open", field: 3, @@ -357,12 +356,14 @@ var wanted_mdt_jobstats_fields = []*mapping{ }, } -func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error { +func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) error { files, err := filepath.Glob(fileglob) if err != nil { return err } + fieldSplitter := regexp.MustCompile(`[ :]+`) + for _, file := range files { /* Turn /proc/fs/lustre/obdfilter//stats and similar * into just the object store target name @@ -373,13 +374,13 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a name := path[len(path)-2] //lines, err := internal.ReadLines(file) - wholeFile, err := ioutil.ReadFile(file) + wholeFile, err := os.ReadFile(file) if err != nil { return err } jobs := strings.Split(string(wholeFile), "- ") for _, job := range jobs { - lines := strings.Split(string(job), "\n") + lines := strings.Split(job, "\n") jobid := "" // figure out if the data should be tagged with job_id here @@ -393,7 +394,11 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a if len(line) < 1 { continue } - parts := strings.Fields(line) + + parts := fieldSplitter.Split(line, -1) + if len(parts[0]) == 0 { + parts = parts[1:] + } var fields map[string]interface{} fields, ok := l.allFields[tags{name, jobid}] @@ -404,14 +409,14 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a for _, wanted := range wantedFields { var data uint64 - if strings.TrimSuffix(parts[0], ":") == wanted.inProc { + if parts[0] == wanted.inProc { wantedField := wanted.field // if not set, assume field[1]. Shouldn't be field[0], as // that's a string if wantedField == 0 { wantedField = 1 } - data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64) + data, err = strconv.ParseUint(strings.TrimSuffix(parts[wantedField], ","), 10, 64) if err != nil { return err } @@ -443,66 +448,60 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error { //l.allFields = make(map[string]map[string]interface{}) l.allFields = make(map[tags]map[string]interface{}) - if len(l.Ost_procfiles) == 0 { + if len(l.OstProcfiles) == 0 { // read/write bytes are in obdfilter//stats - err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", - wanted_ost_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", wantedOstFields) if err != nil { return err } // cache counters are in osd-ldiskfs//stats - err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", - wanted_ost_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", wantedOstFields) if err != nil { return err } // per job statistics are in obdfilter//job_stats - err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", - wanted_ost_jobstats_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", wantedOstJobstatsFields) if err != nil { return err } } - if len(l.Mds_procfiles) == 0 { + if len(l.MdsProcfiles) == 0 { // Metadata server stats - err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", - wanted_mds_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", wantedMdsFields) if err != nil { return err } // Metadata target job stats - err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", - wanted_mdt_jobstats_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", wantedMdtJobstatsFields) if err != nil { return err } } - for _, procfile := range l.Ost_procfiles { - ost_fields := wanted_ost_fields + for _, procfile := range l.OstProcfiles { + ostFields := wantedOstFields if strings.HasSuffix(procfile, "job_stats") { - ost_fields = wanted_ost_jobstats_fields + ostFields = wantedOstJobstatsFields } - err := l.GetLustreProcStats(procfile, ost_fields, acc) + err := l.GetLustreProcStats(procfile, ostFields) if err != nil { return err } } - for _, procfile := range l.Mds_procfiles { - mdt_fields := wanted_mds_fields + for _, procfile := range l.MdsProcfiles { + mdtFields := wantedMdsFields if strings.HasSuffix(procfile, "job_stats") { - mdt_fields = wanted_mdt_jobstats_fields + mdtFields = wantedMdtJobstatsFields } - err := l.GetLustreProcStats(procfile, mdt_fields, acc) + err := l.GetLustreProcStats(procfile, mdtFields) if err != nil { return err } } for tgs, fields := range l.allFields { - tags := map[string]string{ "name": tgs.name, } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 8e93da8e81726..3c5659e18f14f 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -1,15 +1,17 @@ +//go:build !windows +// +build !windows + package lustre2 import ( - "io/ioutil" "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Set config file variables to point to fake directory structure instead of /proc? @@ -45,7 +47,7 @@ const obdfilterJobStatsContents = `job_stats: - job_id: cluster-testjob1 snapshot_time: 1461772761 read_bytes: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 } - write_bytes: { samples: 25, unit: bytes, min: 1048576, max: 1048576, sum: 26214400 } + write_bytes: { samples: 25, unit: bytes, min: 1048576, max:16777216, sum: 26214400 } getattr: { samples: 0, unit: reqs } setattr: { samples: 0, unit: reqs } punch: { samples: 1, unit: reqs } @@ -131,35 +133,34 @@ const mdtJobStatsContents = `job_stats: ` func TestLustre2GeneratesMetrics(t *testing.T) { - tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" - ost_name := "OST0001" + ostName := "OST0001" mdtdir := tempdir + "/mdt/" - err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) + err := os.MkdirAll(mdtdir+"/"+ostName, 0755) require.NoError(t, err) osddir := tempdir + "/osd-ldiskfs/" - err = os.MkdirAll(osddir+"/"+ost_name, 0755) + err = os.MkdirAll(osddir+"/"+ostName, 0755) require.NoError(t, err) obddir := tempdir + "/obdfilter/" - err = os.MkdirAll(obddir+"/"+ost_name, 0755) + err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/md_stats", []byte(mdtProcContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(osddir+"/"+ost_name+"/stats", []byte(osdldiskfsProcContents), 0644) + err = os.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ost_name+"/stats", []byte(obdfilterProcContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) require.NoError(t, err) // Begin by testing standard Lustre stats m := &Lustre2{ - Ost_procfiles: []string{obddir + "/*/stats", osddir + "/*/stats"}, - Mds_procfiles: []string{mdtdir + "/*/md_stats"}, + OstProcfiles: []string{obddir + "/*/stats", osddir + "/*/stats"}, + MdsProcfiles: []string{mdtdir + "/*/md_stats"}, } var acc testutil.Accumulator @@ -168,7 +169,7 @@ func TestLustre2GeneratesMetrics(t *testing.T) { require.NoError(t, err) tags := map[string]string{ - "name": ost_name, + "name": ostName, } fields := map[string]interface{}{ @@ -204,29 +205,28 @@ func TestLustre2GeneratesMetrics(t *testing.T) { } func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { - tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" - ost_name := "OST0001" - job_names := []string{"cluster-testjob1", "testjob2"} + ostName := "OST0001" + jobNames := []string{"cluster-testjob1", "testjob2"} mdtdir := tempdir + "/mdt/" - err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) + err := os.MkdirAll(mdtdir+"/"+ostName, 0755) require.NoError(t, err) obddir := tempdir + "/obdfilter/" - err = os.MkdirAll(obddir+"/"+ost_name, 0755) + err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/job_stats", []byte(mdtJobStatsContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ost_name+"/job_stats", []byte(obdfilterJobStatsContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) require.NoError(t, err) // Test Lustre Jobstats m := &Lustre2{ - Ost_procfiles: []string{obddir + "/*/job_stats"}, - Mds_procfiles: []string{mdtdir + "/*/job_stats"}, + OstProcfiles: []string{obddir + "/*/job_stats"}, + MdsProcfiles: []string{mdtdir + "/*/job_stats"}, } var acc testutil.Accumulator @@ -238,12 +238,12 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { // and even further make this dependent on summing per OST tags := []map[string]string{ { - "name": ost_name, - "jobid": job_names[0], + "name": ostName, + "jobid": jobNames[0], }, { - "name": ost_name, - "jobid": job_names[1], + "name": ostName, + "jobid": jobNames[1], }, } @@ -257,7 +257,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { "jobstats_read_bytes": uint64(4096), "jobstats_write_calls": uint64(25), "jobstats_write_min_size": uint64(1048576), - "jobstats_write_max_size": uint64(1048576), + "jobstats_write_max_size": uint64(16777216), "jobstats_write_bytes": uint64(26214400), "jobstats_ost_getattr": uint64(0), "jobstats_ost_setattr": uint64(0), @@ -345,7 +345,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) { "/proc/fs/lustre/mdt/*/md_stats", ]`) - table, err := toml.Parse([]byte(config)) + table, err := toml.Parse(config) require.NoError(t, err) inputs, ok := table.Fields["inputs"] @@ -358,12 +358,12 @@ func TestLustre2CanParseConfiguration(t *testing.T) { require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) - assert.Equal(t, Lustre2{ - Ost_procfiles: []string{ + require.Equal(t, Lustre2{ + OstProcfiles: []string{ "/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats", }, - Mds_procfiles: []string{ + MdsProcfiles: []string{ "/proc/fs/lustre/mdt/*/md_stats", }, }, plugin) diff --git a/plugins/inputs/lustre2/lustre2_windows.go b/plugins/inputs/lustre2/lustre2_windows.go new file mode 100644 index 0000000000000..cd3aea1b534f1 --- /dev/null +++ b/plugins/inputs/lustre2/lustre2_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package lustre2 diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index a40614b1d0f7e..71e7bcea6d535 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,29 +5,30 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" - "log" "net/http" "net/url" "regexp" "sync" "time" + + "github.com/influxdata/telegraf" ) const ( - reports_endpoint string = "/3.0/reports" - reports_endpoint_campaign string = "/3.0/reports/%s" + reportsEndpoint string = "/3.0/reports" + reportsEndpointCampaign string = "/3.0/reports/%s" ) -var mailchimp_datacenter = regexp.MustCompile("[a-z]+[0-9]+$") +var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$") type ChimpAPI struct { Transport http.RoundTripper - Debug bool + debug bool sync.Mutex url *url.URL + log telegraf.Logger } type ReportsParams struct { @@ -54,12 +55,12 @@ func (p *ReportsParams) String() string { return v.Encode() } -func NewChimpAPI(apiKey string) *ChimpAPI { +func NewChimpAPI(apiKey string, log telegraf.Logger) *ChimpAPI { u := &url.URL{} u.Scheme = "https" - u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimp_datacenter.FindString(apiKey)) + u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey)) u.User = url.UserPassword("", apiKey) - return &ChimpAPI{url: u} + return &ChimpAPI{url: u, log: log} } type APIError struct { @@ -76,7 +77,9 @@ func (e APIError) Error() string { func chimpErrorCheck(body []byte) error { var e APIError - json.Unmarshal(body, &e) + if err := json.Unmarshal(body, &e); err != nil { + return err + } if e.Title != "" || e.Status != 0 { return e } @@ -86,10 +89,10 @@ func chimpErrorCheck(body []byte) error { func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { a.Lock() defer a.Unlock() - a.url.Path = reports_endpoint + a.url.Path = reportsEndpoint var response ReportsResponse - rawjson, err := runChimp(a, params) + rawjson, err := a.runChimp(params) if err != nil { return response, err } @@ -105,10 +108,10 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { a.Lock() defer a.Unlock() - a.url.Path = fmt.Sprintf(reports_endpoint_campaign, campaignID) + a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID) var response Report - rawjson, err := runChimp(a, ReportsParams{}) + rawjson, err := a.runChimp(ReportsParams{}) if err != nil { return response, err } @@ -121,21 +124,21 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { return response, nil } -func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { +func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) { client := &http.Client{ - Transport: api.Transport, - Timeout: time.Duration(4 * time.Second), + Transport: a.Transport, + Timeout: 4 * time.Second, } var b bytes.Buffer - req, err := http.NewRequest("GET", api.url.String(), &b) + req, err := http.NewRequest("GET", a.url.String(), &b) if err != nil { return nil, err } req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") - if api.Debug { - log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String()) + if a.debug { + a.log.Debugf("request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -146,16 +149,16 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) - return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", a.url.String(), resp.Status, body) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } - if api.Debug { - log.Printf("D! [inputs.mailchimp] response Body: %q", string(body)) + if a.debug { + a.log.Debugf("response Body: %q", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index d7255191ab724..b898cb6ba1768 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -11,9 +11,11 @@ import ( type MailChimp struct { api *ChimpAPI - ApiKey string - DaysOld int - CampaignId string + APIKey string `toml:"api_key"` + DaysOld int `toml:"days_old"` + CampaignID string `toml:"campaign_id"` + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -35,13 +37,14 @@ func (m *MailChimp) Description() string { return "Gathers metrics from the /3.0/reports MailChimp API" } -func (m *MailChimp) Gather(acc telegraf.Accumulator) error { - if m.api == nil { - m.api = NewChimpAPI(m.ApiKey) - } - m.api.Debug = false +func (m *MailChimp) Init() error { + m.api = NewChimpAPI(m.APIKey, m.Log) - if m.CampaignId == "" { + return nil +} + +func (m *MailChimp) Gather(acc telegraf.Accumulator) error { + if m.CampaignID == "" { since := "" if m.DaysOld > 0 { now := time.Now() @@ -61,7 +64,7 @@ func (m *MailChimp) Gather(acc telegraf.Accumulator) error { gatherReport(acc, report, now) } } else { - report, err := m.api.GetReport(m.CampaignId) + report, err := m.api.GetReport(m.CampaignID) if err != nil { return err } diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index 0c4dab56d5d12..1df6c52cf6256 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -7,9 +7,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMailChimpGatherReports(t *testing.T) { @@ -17,7 +17,8 @@ func TestMailChimpGatherReports(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleReports) + _, err := fmt.Fprintln(w, sampleReports) + require.NoError(t, err) }, )) defer ts.Close() @@ -27,7 +28,8 @@ func TestMailChimpGatherReports(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -42,22 +44,22 @@ func TestMailChimpGatherReports(t *testing.T) { tags["campaign_title"] = "Freddie's Jokes Vol. 1" fields := map[string]interface{}{ - "emails_sent": int(200), - "abuse_reports": int(0), - "unsubscribed": int(2), - "hard_bounces": int(0), - "soft_bounces": int(2), - "syntax_errors": int(0), - "forwards_count": int(0), - "forwards_opens": int(0), - "opens_total": int(186), - "unique_opens": int(100), - "clicks_total": int(42), - "unique_clicks": int(400), - "unique_subscriber_clicks": int(42), - "facebook_recipient_likes": int(5), - "facebook_unique_likes": int(8), - "facebook_likes": int(42), + "emails_sent": 200, + "abuse_reports": 0, + "unsubscribed": 2, + "hard_bounces": 0, + "soft_bounces": 2, + "syntax_errors": 0, + "forwards_count": 0, + "forwards_opens": 0, + "opens_total": 186, + "unique_opens": 100, + "clicks_total": 42, + "unique_clicks": 400, + "unique_subscriber_clicks": 42, + "facebook_recipient_likes": 5, + "facebook_unique_likes": 8, + "facebook_likes": 42, "open_rate": float64(42), "click_rate": float64(42), "industry_open_rate": float64(0.17076777144396), @@ -80,7 +82,8 @@ func TestMailChimpGatherReport(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleReport) + _, err := fmt.Fprintln(w, sampleReport) + require.NoError(t, err) }, )) defer ts.Close() @@ -90,11 +93,12 @@ func TestMailChimpGatherReport(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, - CampaignId: "test", + CampaignID: "test", } var acc testutil.Accumulator @@ -137,7 +141,6 @@ func TestMailChimpGatherReport(t *testing.T) { "industry_type": "Social Networks and Online Communities", } acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags) - } func TestMailChimpGatherError(t *testing.T) { @@ -145,7 +148,8 @@ func TestMailChimpGatherError(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleError) + _, err := fmt.Fprintln(w, sampleError) + require.NoError(t, err) }, )) defer ts.Close() @@ -155,11 +159,12 @@ func TestMailChimpGatherError(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, - CampaignId: "test", + CampaignID: "test", } var acc testutil.Accumulator diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index b350466122dc7..30f9ee6403074 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -108,7 +108,6 @@ var sampleConfig = ` // Init parse all source URLs and place on the Marklogic struct func (c *Marklogic) Init() error { - if len(c.URL) == 0 { c.URL = "http://localhost:8002/" } @@ -164,9 +163,9 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { return nil } -func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error { +func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, address string) error { ml := &MlHost{} - if err := c.gatherJSONData(url, ml); err != nil { + if err := c.gatherJSONData(address, ml); err != nil { return err } @@ -220,14 +219,14 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: time.Duration(5 * time.Second), + Timeout: 5 * time.Second, } return client, nil } -func (c *Marklogic) gatherJSONData(url string, v interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (c *Marklogic) gatherJSONData(address string, v interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -246,11 +245,7 @@ func (c *Marklogic) gatherJSONData(url string, v interface{}) error { response.StatusCode, http.StatusOK) } - if err = json.NewDecoder(response.Body).Decode(v); err != nil { - return err - } - - return nil + return json.NewDecoder(response.Body).Decode(v) } func init() { diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go index 34e4bbd6bb7e9..5c39fac19051d 100644 --- a/plugins/inputs/marklogic/marklogic_test.go +++ b/plugins/inputs/marklogic/marklogic_test.go @@ -15,7 +15,8 @@ func TestMarklogic(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() @@ -27,7 +28,7 @@ func TestMarklogic(t *testing.T) { ml := &Marklogic{ Hosts: []string{"example1"}, - URL: string(ts.URL), + URL: ts.URL, //Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"}, } @@ -76,7 +77,6 @@ func TestMarklogic(t *testing.T) { } acc.AssertContainsTaggedFields(t, "marklogic", expectFields, expectTags) - } var response = ` diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index d6303c87758e4..07599ca2cc0b0 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -11,14 +11,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Mcrouter is a mcrouter plugin type Mcrouter struct { Servers []string - Timeout internal.Duration + Timeout config.Duration } // enum for statType @@ -127,11 +127,11 @@ func (m *Mcrouter) Description() string { func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { ctx := context.Background() - if m.Timeout.Duration < 1*time.Second { - m.Timeout.Duration = defaultTimeout + if m.Timeout < config.Duration(1*time.Second) { + m.Timeout = config.Duration(defaultTimeout) } - ctx, cancel := context.WithTimeout(ctx, m.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(m.Timeout)) defer cancel() if len(m.Servers) == 0 { @@ -146,32 +146,33 @@ func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { } // ParseAddress parses an address string into 'host:port' and 'protocol' parts -func (m *Mcrouter) ParseAddress(address string) (string, string, error) { - var protocol string +func (m *Mcrouter) ParseAddress(address string) (parsedAddress string, protocol string, err error) { var host string var port string - u, parseError := url.Parse(address) + parsedAddress = address + + u, parseError := url.Parse(parsedAddress) if parseError != nil { - return "", "", fmt.Errorf("Invalid server address") + return "", "", fmt.Errorf("invalid server address") } if u.Scheme != "tcp" && u.Scheme != "unix" { - return "", "", fmt.Errorf("Invalid server protocol") + return "", "", fmt.Errorf("invalid server protocol") } protocol = u.Scheme if protocol == "unix" { if u.Path == "" { - return "", "", fmt.Errorf("Invalid unix socket path") + return "", "", fmt.Errorf("invalid unix socket path") } - address = u.Path + parsedAddress = u.Path } else { if u.Host == "" { - return "", "", fmt.Errorf("Invalid host") + return "", "", fmt.Errorf("invalid host") } host = u.Hostname() @@ -185,10 +186,10 @@ func (m *Mcrouter) ParseAddress(address string) (string, string, error) { port = defaultServerURL.Port() } - address = host + ":" + port + parsedAddress = host + ":" + port } - return address, protocol, nil + return parsedAddress, protocol, nil } func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { @@ -213,7 +214,9 @@ func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegra deadline, ok := ctx.Deadline() if ok { - conn.SetDeadline(deadline) + if err := conn.SetDeadline(deadline); err != nil { + return err + } } // Read and write buffer diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index e17c13b6d6655..f02f2b53d4b85 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestAddressParsing(t *testing.T) { @@ -30,21 +30,21 @@ func TestAddressParsing(t *testing.T) { for _, args := range acceptTests { address, protocol, err := m.ParseAddress(args[0]) - assert.Nil(t, err, args[0]) - assert.True(t, address == args[1], args[0]) - assert.True(t, protocol == args[2], args[0]) + require.Nil(t, err, args[0]) + require.Equal(t, args[1], address, args[0]) + require.Equal(t, args[2], protocol, args[0]) } for _, addr := range rejectTests { address, protocol, err := m.ParseAddress(addr) - assert.NotNil(t, err, addr) - assert.Empty(t, address, addr) - assert.Empty(t, protocol, addr) + require.NotNil(t, err, addr) + require.Empty(t, address, addr) + require.Empty(t, protocol, addr) } } -func TestMcrouterGeneratesMetrics(t *testing.T) { +func TestMcrouterGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -58,33 +58,82 @@ func TestMcrouterGeneratesMetrics(t *testing.T) { err := acc.GatherError(m.Gather) require.NoError(t, err) - intMetrics := []string{"uptime", "num_servers", "num_servers_new", "num_servers_up", - "num_servers_down", "num_servers_closed", "num_clients", - "num_suspect_servers", "destination_batches_sum", "destination_requests_sum", - "outstanding_route_get_reqs_queued", "outstanding_route_update_reqs_queued", - "outstanding_route_get_avg_queue_size", "outstanding_route_update_avg_queue_size", - "outstanding_route_get_avg_wait_time_sec", "outstanding_route_update_avg_wait_time_sec", - "retrans_closed_connections", "destination_pending_reqs", "destination_inflight_reqs", - "destination_batch_size", "asynclog_requests", "proxy_reqs_processing", - "proxy_reqs_waiting", "client_queue_notify_period", - "ps_num_minor_faults", "ps_num_major_faults", - "ps_vsize", "ps_rss", "fibers_allocated", "fibers_pool_size", "fibers_stack_high_watermark", - "successful_client_connections", "duration_us", "destination_max_pending_reqs", - "destination_max_inflight_reqs", "retrans_per_kbyte_max", "cmd_get_count", "cmd_delete_out", - "cmd_lease_get", "cmd_set", "cmd_get_out_all", "cmd_get_out", "cmd_lease_set_count", - "cmd_other_out_all", "cmd_lease_get_out", "cmd_set_count", "cmd_lease_set_out", - "cmd_delete_count", "cmd_other", "cmd_delete", "cmd_get", "cmd_lease_set", "cmd_set_out", - "cmd_lease_get_count", "cmd_other_out", "cmd_lease_get_out_all", "cmd_set_out_all", - "cmd_other_count", "cmd_delete_out_all", "cmd_lease_set_out_all"} - - floatMetrics := []string{"rusage_system", "rusage_user", "ps_user_time_sec", "ps_system_time_sec"} + intMetrics := []string{ + "uptime", + // "num_servers", + // "num_servers_new", + // "num_servers_up", + // "num_servers_down", + // "num_servers_closed", + // "num_clients", + // "num_suspect_servers", + // "destination_batches_sum", + // "destination_requests_sum", + // "outstanding_route_get_reqs_queued", + // "outstanding_route_update_reqs_queued", + // "outstanding_route_get_avg_queue_size", + // "outstanding_route_update_avg_queue_size", + // "outstanding_route_get_avg_wait_time_sec", + // "outstanding_route_update_avg_wait_time_sec", + // "retrans_closed_connections", + // "destination_pending_reqs", + // "destination_inflight_reqs", + // "destination_batch_size", + // "asynclog_requests", + // "proxy_reqs_processing", + // "proxy_reqs_waiting", + // "client_queue_notify_period", + // "ps_num_minor_faults", + // "ps_num_major_faults", + // "ps_vsize", + // "ps_rss", + // "fibers_allocated", + // "fibers_pool_size", + // "fibers_stack_high_watermark", + // "successful_client_connections", + // "duration_us", + // "destination_max_pending_reqs", + // "destination_max_inflight_reqs", + // "retrans_per_kbyte_max", + // "cmd_get_count", + // "cmd_delete_out", + // "cmd_lease_get", + "cmd_set", + // "cmd_get_out_all", + // "cmd_get_out", + // "cmd_lease_set_count", + // "cmd_other_out_all", + // "cmd_lease_get_out", + // "cmd_set_count", + // "cmd_lease_set_out", + // "cmd_delete_count", + // "cmd_other", + // "cmd_delete", + "cmd_get", + // "cmd_lease_set", + // "cmd_set_out", + // "cmd_lease_get_count", + // "cmd_other_out", + // "cmd_lease_get_out_all", + // "cmd_set_out_all", + // "cmd_other_count", + // "cmd_delete_out_all", + // "cmd_lease_set_out_all" + } + + floatMetrics := []string{ + "rusage_system", + "rusage_user", + // "ps_user_time_sec", + // "ps_system_time_sec", + } for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("mcrouter", metric), metric) + require.True(t, acc.HasInt64Field("mcrouter", metric), metric) } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("mcrouter", metric), metric) + require.True(t, acc.HasFloatField("mcrouter", metric), metric) } } diff --git a/plugins/inputs/mdstat/README.md b/plugins/inputs/mdstat/README.md new file mode 100644 index 0000000000000..6180833b69ade --- /dev/null +++ b/plugins/inputs/mdstat/README.md @@ -0,0 +1,49 @@ +# mdstat Input Plugin + +The mdstat plugin gathers statistics about any Linux MD RAID arrays configured on the host +by reading /proc/mdstat. For a full list of available fields see the +/proc/mdstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). +For a better idea of what each field represents, see the +[mdstat man page](https://raid.wiki.kernel.org/index.php/Mdstat). + +Stat collection based on Prometheus' mdstat collection library at https://github.com/prometheus/procfs/blob/master/mdstat.go + + +### Configuration: + +```toml +# Get kernel statistics from /proc/mdstat +[[inputs.mdstat]] + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" +``` + +### Measurements & Fields: + +- mdstat + - BlocksSynced (if the array is rebuilding/checking, this is the count of blocks that have been scanned) + - BlocksSyncedFinishTime (the expected finish time of the rebuild scan, listed in minutes remaining) + - BlocksSyncedPct (the percentage of the rebuild scan left) + - BlocksSyncedSpeed (the current speed the rebuild is running at, listed in K/sec) + - BlocksTotal (the total count of blocks in the array) + - DisksActive (the number of disks that are currently considered healthy in the array) + - DisksFailed (the current count of failed disks in the array) + - DisksSpare (the current count of "spare" disks in the array) + - DisksTotal (total count of disks in the array) + +### Tags: + +- mdstat + - ActivityState (`active` or `inactive`) + - Devices (comma separated list of devices that make up the array) + - Name (name of the array) + +### Example Output: + +``` +$ telegraf --config ~/ws/telegraf.conf --input-filter mdstat --test +* Plugin: mdstat, Collection 1 +> mdstat,ActivityState=active,Devices=sdm1\,sdn1,Name=md1 BlocksSynced=231299072i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=231299072i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +> mdstat,ActivityState=active,Devices=sdm5\,sdn5,Name=md2 BlocksSynced=2996224i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=2996224i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +``` diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go new file mode 100644 index 0000000000000..3f6fee7d086ca --- /dev/null +++ b/plugins/inputs/mdstat/mdstat.go @@ -0,0 +1,313 @@ +//go:build linux +// +build linux + +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code has been changed since initial import. + +package mdstat + +import ( + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + defaultHostProc = "/proc" + envProc = "HOST_PROC" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) + recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) + recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) +) + +type statusLine struct { + active int64 + total int64 + size int64 + down int64 +} + +type recoveryLine struct { + syncedBlocks int64 + pct float64 + finish float64 + speed float64 +} + +type MdstatConf struct { + FileName string `toml:"file_name"` +} + +func (k *MdstatConf) Description() string { + return "Get md array statistics from /proc/mdstat" +} + +var mdSampleConfig = ` + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" +` + +func (k *MdstatConf) SampleConfig() string { + return mdSampleConfig +} + +func evalStatusLine(deviceLine, statusLineStr string) (statusLine, error) { + sizeFields := strings.Fields(statusLineStr) + if len(sizeFields) < 1 { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("statusLine empty? %q", statusLineStr) + } + sizeStr := sizeFields[0] + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total := int64(strings.Count(deviceLine, "[")) + return statusLine{active: total, total: total, down: 0, size: size}, nil + } + + if strings.Contains(deviceLine, "inactive") { + return statusLine{active: 0, total: 0, down: 0, size: size}, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLineStr) + if len(matches) != 5 { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("couldn't find all the substring matches: %s", statusLineStr) + } + total, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + active, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return statusLine{active: 0, total: total, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + down := int64(strings.Count(matches[4], "_")) + + return statusLine{active: active, total: total, size: size, down: down}, nil +} + +func evalRecoveryLine(recoveryLineStr string) (recoveryLine, error) { + // Get count of completed vs. total blocks + matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching syncedBlocks: %s", recoveryLineStr) + } + syncedBlocks, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get percentage complete + matches = recoveryLinePctRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLineStr) + } + pct, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get time expected left to complete + matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLineStr) + } + finish, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get recovery speed + matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLineStr) + } + speed, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: speed}, nil +} + +func evalComponentDevices(deviceFields []string) string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + // Ensure no churn on tag ordering change + sort.Strings(mdComponentDevices) + return strings.Join(mdComponentDevices, ",") +} + +func (k *MdstatConf) Gather(acc telegraf.Accumulator) error { + data, err := k.getProcMdstat() + if err != nil { + return err + } + lines := strings.Split(string(data), "\n") + // empty file should return nothing + if len(lines) < 3 { + return nil + } + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { + continue + } + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 || len(lines) <= i+3 { + return fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + /* + Failed disks have the suffix (F) & Spare disks have the suffix (S). + Failed disks may also not be marked separately... + */ + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + + sts, err := evalStatusLine(lines[i], lines[i+1]) + if err != nil { + return fmt.Errorf("error parsing md device lines: %w", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + var rcvry recoveryLine + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + rcvry.syncedBlocks = sts.size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { + rcvry.syncedBlocks = 0 + } else { + var err error + rcvry, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + } + } + } + fields := map[string]interface{}{ + "DisksActive": sts.active, + "DisksFailed": fail, + "DisksSpare": spare, + "DisksTotal": sts.total, + "DisksDown": sts.down, + "BlocksTotal": sts.size, + "BlocksSynced": rcvry.syncedBlocks, + "BlocksSyncedPct": rcvry.pct, + "BlocksSyncedFinishTime": rcvry.finish, + "BlocksSyncedSpeed": rcvry.speed, + } + tags := map[string]string{ + "Name": mdName, + "ActivityState": state, + "Devices": evalComponentDevices(deviceFields), + } + acc.AddFields("mdstat", fields, tags) + } + + return nil +} + +func (k *MdstatConf) getProcMdstat() ([]byte, error) { + var mdStatFile string + if k.FileName == "" { + mdStatFile = proc(envProc, defaultHostProc) + "/mdstat" + } else { + mdStatFile = k.FileName + } + if _, err := os.Stat(mdStatFile); os.IsNotExist(err) { + return nil, fmt.Errorf("mdstat: %s does not exist", mdStatFile) + } else if err != nil { + return nil, err + } + + data, err := os.ReadFile(mdStatFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func init() { + inputs.Add("mdstat", func() telegraf.Input { return &MdstatConf{} }) +} + +// proc can be used to read file paths from env +func proc(env, path string) string { + // try to read full file path + if p := os.Getenv(env); p != "" { + return p + } + // return default path + return path +} diff --git a/plugins/inputs/mdstat/mdstat_notlinux.go b/plugins/inputs/mdstat/mdstat_notlinux.go new file mode 100644 index 0000000000000..409ae776102b0 --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package mdstat diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go new file mode 100644 index 0000000000000..27397f715ad0d --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -0,0 +1,149 @@ +//go:build linux +// +build linux + +package mdstat + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestFullMdstatProcFile(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFull)) + defer os.Remove(filename) + k := MdstatConf{ + FileName: filename, + } + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(10620027200), + "BlocksSyncedFinishTime": float64(101.6), + "BlocksSyncedPct": float64(94.3), + "BlocksSyncedSpeed": float64(103517), + "BlocksTotal": int64(11251451904), + "DisksActive": int64(12), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(12), + "DisksDown": int64(0), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestFailedDiskMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFailedDisk)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(5860144128), + "BlocksSyncedFinishTime": float64(0), + "BlocksSyncedPct": float64(0), + "BlocksSyncedSpeed": float64(0), + "BlocksTotal": int64(5860144128), + "DisksActive": int64(3), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(4), + "DisksDown": int64(1), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestEmptyMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileEmpty)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.NoError(t, err) +} + +func TestInvalidMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileInvalid)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.Error(t, err) +} + +const mdStatFileFull = ` +Personalities : [raid1] [raid10] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] +md2 : active raid10 sde[2] sdl[9] sdf[3] sdk[8] sdh[5] sdd[1] sdg[4] sdn[11] sdm[10] sdj[7] sdc[0] sdi[6] + 11251451904 blocks super 1.2 512K chunks 2 near-copies [12/12] [UUUUUUUUUUUU] + [==================>..] check = 94.3% (10620027200/11251451904) finish=101.6min speed=103517K/sec + bitmap: 35/84 pages [140KB], 65536KB chunk + +md1 : active raid1 sdb2[2] sda2[0] + 5909504 blocks super 1.2 [2/2] [UU] + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +const mdStatFileFailedDisk = ` +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md0 : active raid5 sdd1[3] sdb1[1] sda1[0] + 5860144128 blocks super 1.2 level 5, 64k chunk, algorithm 2 [4/3] [UUU_] + bitmap: 8/15 pages [32KB], 65536KB chunk + +unused devices: +` + +const mdStatFileEmpty = ` +Personalities : +unused devices: +` + +const mdStatFileInvalid = ` +Personalities : + +mdf1: testman actve + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +func makeFakeMDStatFile(content []byte) (filename string) { + fileobj, err := os.CreateTemp("", "mdstat") + if err != nil { + panic(err) + } + + if _, err = fileobj.Write(content); err != nil { + panic(err) + } + if err := fileobj.Close(); err != nil { + panic(err) + } + return fileobj.Name() +} diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go index c8dbd0c2a43b5..d01bf2a0fa156 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/memory.go @@ -14,19 +14,19 @@ type MemStats struct { platform string } -func (_ *MemStats) Description() string { +func (ms *MemStats) Description() string { return "Read metrics about memory usage" } -func (_ *MemStats) SampleConfig() string { return "" } +func (ms *MemStats) SampleConfig() string { return "" } -func (m *MemStats) Init() error { - m.platform = runtime.GOOS +func (ms *MemStats) Init() error { + ms.platform = runtime.GOOS return nil } -func (s *MemStats) Gather(acc telegraf.Accumulator) error { - vm, err := s.ps.VMStat() +func (ms *MemStats) Gather(acc telegraf.Accumulator) error { + vm, err := ms.ps.VMStat() if err != nil { return fmt.Errorf("error getting virtual memory info: %s", err) } @@ -39,7 +39,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "available_percent": 100 * float64(vm.Available) / float64(vm.Total), } - switch s.platform { + switch ms.platform { case "darwin": fields["active"] = vm.Active fields["free"] = vm.Free diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 99128263ade10..eefb3f85441ea 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -129,7 +129,9 @@ func (m *Memcached) gatherServer( } // Extend connection - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 3c8a239f06d73..1ebfe65bad6fb 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -5,12 +5,12 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) -func TestMemcachedGeneratesMetrics(t *testing.T) { +func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -32,7 +32,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("memcached", metric), metric) + require.True(t, acc.HasInt64Field("memcached", metric), metric) } } diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index acc836cba34bb..991f8a9fd7003 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -3,8 +3,7 @@ package mesos import ( "encoding/json" "errors" - "io/ioutil" - "log" + "io" "net" "net/http" "net/url" @@ -23,7 +22,7 @@ type Role string const ( MASTER Role = "master" - SLAVE = "slave" + SLAVE Role = "slave" ) type Mesos struct { @@ -100,7 +99,7 @@ func (m *Mesos) Description() string { return "Telegraf plugin for gathering metrics from N Mesos masters" } -func parseURL(s string, role Role) (*url.URL, error) { +func (m *Mesos) parseURL(s string, role Role) (*url.URL, error) { if !strings.HasPrefix(s, "http://") && !strings.HasPrefix(s, "https://") { host, port, err := net.SplitHostPort(s) // no port specified @@ -115,7 +114,7 @@ func parseURL(s string, role Role) (*url.URL, error) { } s = "http://" + host + ":" + port - log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s) + m.Log.Warnf("using %q as connection URL; please update your configuration to use an URL", s) } return url.Parse(s) @@ -139,7 +138,7 @@ func (m *Mesos) initialize() error { m.masterURLs = make([]*url.URL, 0, len(m.Masters)) for _, master := range m.Masters { - u, err := parseURL(master, MASTER) + u, err := m.parseURL(master, MASTER) if err != nil { return err } @@ -150,7 +149,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = make([]*url.URL, 0, len(m.Slaves)) for _, slave := range m.Slaves { - u, err := parseURL(slave, SLAVE) + u, err := m.parseURL(slave, SLAVE) if err != nil { return err } @@ -159,7 +158,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = append(m.slaveURLs, u) } - client, err := m.createHttpClient() + client, err := m.createHTTPClient() if err != nil { return err } @@ -185,7 +184,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { go func(master *url.URL) { acc.AddError(m.gatherMainMetrics(master, MASTER, acc)) wg.Done() - return }(master) } @@ -194,7 +192,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { go func(slave *url.URL) { acc.AddError(m.gatherMainMetrics(slave, SLAVE, acc)) wg.Done() - return }(slave) } @@ -203,7 +200,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { return nil } -func (m *Mesos) createHttpClient() (*http.Client, error) { +func (m *Mesos) createHTTPClient() (*http.Client, error) { tlsCfg, err := m.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -243,13 +240,11 @@ func metricsDiff(role Role, w []string) []string { } // masterBlocks serves as kind of metrics registry grouping them in sets -func getMetrics(role Role, group string) []string { - var m map[string][]string - - m = make(map[string][]string) +func (m *Mesos) getMetrics(role Role, group string) []string { + metrics := make(map[string][]string) if role == MASTER { - m["resources"] = []string{ + metrics["resources"] = []string{ "master/cpus_percent", "master/cpus_used", "master/cpus_total", @@ -276,12 +271,12 @@ func getMetrics(role Role, group string) []string { "master/mem_revocable_used", } - m["master"] = []string{ + metrics["master"] = []string{ "master/elected", "master/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -290,7 +285,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["agents"] = []string{ + metrics["agents"] = []string{ "master/slave_registrations", "master/slave_removals", "master/slave_reregistrations", @@ -307,7 +302,7 @@ func getMetrics(role Role, group string) []string { "master/slaves_unreachable", } - m["frameworks"] = []string{ + metrics["frameworks"] = []string{ "master/frameworks_active", "master/frameworks_connected", "master/frameworks_disconnected", @@ -318,10 +313,10 @@ func getMetrics(role Role, group string) []string { // framework_offers and allocator metrics have unpredictable names, so they can't be listed here. // These empty groups are included to prevent the "unknown metrics group" info log below. // filterMetrics() filters these metrics by looking for names with the corresponding prefix. - m["framework_offers"] = []string{} - m["allocator"] = []string{} + metrics["framework_offers"] = []string{} + metrics["allocator"] = []string{} - m["tasks"] = []string{ + metrics["tasks"] = []string{ "master/tasks_error", "master/tasks_failed", "master/tasks_finished", @@ -337,7 +332,7 @@ func getMetrics(role Role, group string) []string { "master/tasks_unreachable", } - m["messages"] = []string{ + metrics["messages"] = []string{ "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", "master/invalid_status_update_acknowledgements", @@ -381,14 +376,14 @@ func getMetrics(role Role, group string) []string { "master/valid_operation_status_update_acknowledgements", } - m["evqueue"] = []string{ + metrics["evqueue"] = []string{ "master/event_queue_dispatches", "master/event_queue_http_requests", "master/event_queue_messages", "master/operator_event_stream_subscribers", } - m["registrar"] = []string{ + metrics["registrar"] = []string{ "registrar/state_fetch_ms", "registrar/state_store_ms", "registrar/state_store_ms/max", @@ -406,7 +401,7 @@ func getMetrics(role Role, group string) []string { "registrar/state_store_ms/count", } } else if role == SLAVE { - m["resources"] = []string{ + metrics["resources"] = []string{ "slave/cpus_percent", "slave/cpus_used", "slave/cpus_total", @@ -433,12 +428,12 @@ func getMetrics(role Role, group string) []string { "slave/mem_revocable_used", } - m["agent"] = []string{ + metrics["agent"] = []string{ "slave/registered", "slave/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -447,7 +442,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["executors"] = []string{ + metrics["executors"] = []string{ "containerizer/mesos/container_destroy_errors", "slave/container_launch_errors", "slave/executors_preempted", @@ -460,7 +455,7 @@ func getMetrics(role Role, group string) []string { "slave/recovery_errors", } - m["tasks"] = []string{ + metrics["tasks"] = []string{ "slave/tasks_failed", "slave/tasks_finished", "slave/tasks_killed", @@ -470,7 +465,7 @@ func getMetrics(role Role, group string) []string { "slave/tasks_starting", } - m["messages"] = []string{ + metrics["messages"] = []string{ "slave/invalid_framework_messages", "slave/invalid_status_updates", "slave/valid_framework_messages", @@ -478,10 +473,10 @@ func getMetrics(role Role, group string) []string { } } - ret, ok := m[group] + ret, ok := metrics[group] if !ok { - log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group) + m.Log.Infof("unknown role %q metrics group: %s", role, group) return []string{} } @@ -504,21 +499,21 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { case "allocator": for m := range *metrics { if strings.HasPrefix(m, "allocator/") { - delete((*metrics), m) + delete(*metrics, m) } } case "framework_offers": for m := range *metrics { if strings.HasPrefix(m, "master/frameworks/") || strings.HasPrefix(m, "frameworks/") { - delete((*metrics), m) + delete(*metrics, m) } } // All other metrics have predictable names. We can use getMetrics() to retrieve them. default: - for _, v := range getMetrics(role, k) { + for _, v := range m.getMetrics(role, k) { if _, ok = (*metrics)[v]; ok { - delete((*metrics), v) + delete(*metrics, v) } } } @@ -532,49 +527,6 @@ type TaskStats struct { Statistics map[string]interface{} `json:"statistics"` } -func (m *Mesos) gatherSlaveTaskMetrics(u *url.URL, acc telegraf.Accumulator) error { - var metrics []TaskStats - - tags := map[string]string{ - "server": u.Hostname(), - "url": urlTag(u), - } - - resp, err := m.client.Get(withPath(u, "/monitor/statistics").String()) - - if err != nil { - return err - } - - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return err - } - - if err = json.Unmarshal([]byte(data), &metrics); err != nil { - return errors.New("Error decoding JSON response") - } - - for _, task := range metrics { - tags["framework_id"] = task.FrameworkID - - jf := jsonparser.JSONFlattener{} - err = jf.FlattenJSON("", task.Statistics) - - if err != nil { - return err - } - - timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0) - jf.Fields["executor_id"] = task.ExecutorID - - acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp) - } - - return nil -} - func withPath(u *url.URL, path string) *url.URL { c := *u c.Path = path @@ -605,14 +557,16 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato return err } - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) + // Ignore the returned error to not shadow the initial one + //nolint:errcheck,revive resp.Body.Close() if err != nil { return err } - if err = json.Unmarshal([]byte(data), &jsonOut); err != nil { - return errors.New("Error decoding JSON response") + if err = json.Unmarshal(data, &jsonOut); err != nil { + return errors.New("error decoding JSON response") } m.filterMetrics(role, &jsonOut) diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index e25f250c8f8d4..2605ddd4678c2 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -2,7 +2,6 @@ package mesos import ( "encoding/json" - "fmt" "math/rand" "net/http" "net/http/httptest" @@ -11,25 +10,19 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var masterMetrics map[string]interface{} var masterTestServer *httptest.Server var slaveMetrics map[string]interface{} -// var slaveTaskMetrics map[string]interface{} var slaveTestServer *httptest.Server -func randUUID() string { - b := make([]byte, 16) - rand.Read(b) - return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) -} - // master metrics that will be returned by generateMetrics() -var masterMetricNames []string = []string{ +var masterMetricNames = []string{ // resources "master/cpus_percent", "master/cpus_used", @@ -214,7 +207,7 @@ var masterMetricNames []string = []string{ } // slave metrics that will be returned by generateMetrics() -var slaveMetricNames []string = []string{ +var slaveMetricNames = []string{ // resources "slave/cpus_percent", "slave/cpus_used", @@ -286,32 +279,6 @@ func generateMetrics() { for _, k := range slaveMetricNames { slaveMetrics[k] = rand.Float64() } - - // slaveTaskMetrics = map[string]interface{}{ - // "executor_id": fmt.Sprintf("task_name.%s", randUUID()), - // "executor_name": "Some task description", - // "framework_id": randUUID(), - // "source": fmt.Sprintf("task_source.%s", randUUID()), - // "statistics": map[string]interface{}{ - // "cpus_limit": rand.Float64(), - // "cpus_system_time_secs": rand.Float64(), - // "cpus_user_time_secs": rand.Float64(), - // "mem_anon_bytes": float64(rand.Int63()), - // "mem_cache_bytes": float64(rand.Int63()), - // "mem_critical_pressure_counter": float64(rand.Int63()), - // "mem_file_bytes": float64(rand.Int63()), - // "mem_limit_bytes": float64(rand.Int63()), - // "mem_low_pressure_counter": float64(rand.Int63()), - // "mem_mapped_file_bytes": float64(rand.Int63()), - // "mem_medium_pressure_counter": float64(rand.Int63()), - // "mem_rss_bytes": float64(rand.Int63()), - // "mem_swap_bytes": float64(rand.Int63()), - // "mem_total_bytes": float64(rand.Int63()), - // "mem_total_memsw_bytes": float64(rand.Int63()), - // "mem_unevictable_bytes": float64(rand.Int63()), - // "timestamp": rand.Float64(), - // }, - // } } func TestMain(m *testing.M) { @@ -321,6 +288,8 @@ func TestMain(m *testing.M) { masterRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive json.NewEncoder(w).Encode(masterMetrics) }) masterTestServer = httptest.NewServer(masterRouter) @@ -329,13 +298,10 @@ func TestMain(m *testing.M) { slaveRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive json.NewEncoder(w).Encode(slaveMetrics) }) - // slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) { - // w.WriteHeader(http.StatusOK) - // w.Header().Set("Content-Type", "application/json") - // json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics}) - // }) slaveTestServer = httptest.NewServer(slaveRouter) rc := m.Run() @@ -354,11 +320,7 @@ func TestMesosMaster(t *testing.T) { Timeout: 10, } - err := acc.GatherError(m.Gather) - - if err != nil { - t.Errorf(err.Error()) - } + require.NoError(t, acc.GatherError(m.Gather)) acc.AssertContainsFields(t, "mesos", masterMetrics) } @@ -379,10 +341,9 @@ func TestMasterFilter(t *testing.T) { // Assert expected metrics are present. for _, v := range m.MasterCols { - for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + for _, x := range m.getMetrics(MASTER, v) { + _, ok := masterMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } // m.MasterCols includes "allocator", so allocator metrics should be present. @@ -390,18 +351,16 @@ func TestMasterFilter(t *testing.T) { // getMetrics(). We have to find them by checking name prefixes. for _, x := range masterMetricNames { if strings.HasPrefix(x, "allocator/") { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should be present.", x) - } + _, ok := masterMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } // Assert unexpected metrics are not present. for _, v := range b { - for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; ok { - t.Errorf("Found key %s, it should be gone.", x) - } + for _, x := range m.getMetrics(MASTER, v) { + _, ok := masterMetrics[x] + require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } // m.MasterCols does not include "framework_offers", so framework_offers metrics should not be present. @@ -409,7 +368,7 @@ func TestMasterFilter(t *testing.T) { // getMetrics(). We have to find them by checking name prefixes. for k := range masterMetrics { if strings.HasPrefix(k, "master/frameworks/") || strings.HasPrefix(k, "frameworks/") { - t.Errorf("Found key %s, it should be gone.", k) + require.Failf(t, "Found key %s, it should be gone.", k) } } } @@ -425,11 +384,7 @@ func TestMesosSlave(t *testing.T) { Timeout: 10, } - err := acc.GatherError(m.Gather) - - if err != nil { - t.Errorf(err.Error()) - } + require.NoError(t, acc.GatherError(m.Gather)) acc.AssertContainsFields(t, "mesos", slaveMetrics) } @@ -448,17 +403,15 @@ func TestSlaveFilter(t *testing.T) { m.filterMetrics(SLAVE, &slaveMetrics) for _, v := range b { - for _, x := range getMetrics(SLAVE, v) { - if _, ok := slaveMetrics[x]; ok { - t.Errorf("Found key %s, it should be gone.", x) - } + for _, x := range m.getMetrics(SLAVE, v) { + _, ok := slaveMetrics[x] + require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } for _, v := range m.MasterCols { - for _, x := range getMetrics(SLAVE, v) { - if _, ok := slaveMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + for _, x := range m.getMetrics(SLAVE, v) { + _, ok := slaveMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } } diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 30f56213af345..4aa712d4b04f4 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -25,12 +25,12 @@ type Connector interface { Connect() (Connection, error) } -func NewConnector(hostname, port, password string) (*connector, error) { +func newConnector(hostname, port, password string) *connector { return &connector{ hostname: hostname, port: port, password: password, - }, nil + } } type connector struct { @@ -45,21 +45,21 @@ func (c *connector) Connect() (Connection, error) { return nil, err } - rcon, err := rcon.NewClient(c.hostname, p) + client, err := rcon.NewClient(c.hostname, p) if err != nil { return nil, err } - _, err = rcon.Authorize(c.password) + _, err = client.Authorize(c.password) if err != nil { return nil, err } - return &connection{rcon: rcon}, nil + return &connection{client: client}, nil } -func NewClient(connector Connector) (*client, error) { - return &client{connector: connector}, nil +func newClient(connector Connector) *client { + return &client{connector: connector} } type client struct { @@ -90,13 +90,7 @@ func (c *client) Players() ([]string, error) { return nil, err } - players, err := parsePlayers(resp) - if err != nil { - c.conn = nil - return nil, err - } - - return players, nil + return parsePlayers(resp), nil } func (c *client) Scores(player string) ([]Score, error) { @@ -113,31 +107,25 @@ func (c *client) Scores(player string) ([]Score, error) { return nil, err } - scores, err := parseScores(resp) - if err != nil { - c.conn = nil - return nil, err - } - - return scores, nil + return parseScores(resp), nil } type connection struct { - rcon *rcon.Client + client *rcon.Client } func (c *connection) Execute(command string) (string, error) { - packet, err := c.rcon.Execute(command) + packet, err := c.client.Execute(command) if err != nil { return "", err } return packet.Body, nil } -func parsePlayers(input string) ([]string, error) { +func parsePlayers(input string) []string { parts := strings.SplitAfterN(input, ":", 2) if len(parts) != 2 { - return []string{}, nil + return []string{} } names := strings.Split(parts[1], ",") @@ -157,9 +145,8 @@ func parsePlayers(input string) ([]string, error) { continue } players = append(players, name) - } - return players, nil + return players } // Score is an individual tracked scoreboard stat. @@ -168,9 +155,9 @@ type Score struct { Value int64 } -func parseScores(input string) ([]Score, error) { +func parseScores(input string) []Score { if strings.Contains(input, "has no scores") { - return []Score{}, nil + return []Score{} } // Detect Minecraft <= 1.12 @@ -201,5 +188,6 @@ func parseScores(input string) ([]Score, error) { } scores = append(scores, score) } - return scores, nil + + return scores } diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go index 767a0c30ef5d3..59db9bf34a8d6 100644 --- a/plugins/inputs/minecraft/client_test.go +++ b/plugins/inputs/minecraft/client_test.go @@ -98,9 +98,7 @@ func TestClient_Player(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := NewClient(connector) - require.NoError(t, err) - + client := newClient(connector) actual, err := client.Players() require.NoError(t, err) @@ -183,9 +181,7 @@ func TestClient_Scores(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := NewClient(connector) - require.NoError(t, err) - + client := newClient(connector) actual, err := client.Scores(tt.player) require.NoError(t, err) diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index f9e49e6e62d4e..ccc020edb4fb6 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -32,11 +32,11 @@ const ( // Rcon package errors. var ( - ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.") - ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.") - ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.") - ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.") - ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.") + ErrInvalidWrite = errors.New("failed to write the payload correctly to remote connection") + ErrInvalidRead = errors.New("failed to read the response correctly from remote connection") + ErrInvalidChallenge = errors.New("server failed to mirror request challenge") + ErrUnauthorizedRequest = errors.New("client not authorized to remote server") + ErrFailedAuthorization = errors.New("failed to authorize to the remote server") ) type Client struct { @@ -62,7 +62,7 @@ type Packet struct { // Write method fails to write the header bytes in their little // endian byte order. func (p Packet) Compile() (payload []byte, err error) { - var size int32 = p.Header.Size + var size = p.Header.Size var buffer bytes.Buffer var padding [PacketPaddingSize]byte @@ -74,8 +74,12 @@ func (p Packet) Compile() (payload []byte, err error) { return } - buffer.WriteString(p.Body) - buffer.Write(padding[:]) + if _, err := buffer.WriteString(p.Body); err != nil { + return nil, err + } + if _, err := buffer.Write(padding[:]); err != nil { + return nil, err + } return buffer.Bytes(), nil } @@ -115,85 +119,90 @@ func (c *Client) Execute(command string) (response *Packet, err error) { // and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned // if send fails. -func (c *Client) Send(typ int32, command string) (response *Packet, err error) { +func (c *Client) Send(typ int32, command string) (*Packet, error) { if typ != Auth && !c.Authorized { - err = ErrUnauthorizedRequest - return + return nil, ErrUnauthorizedRequest } // Create a random challenge for the server to mirror in its response. var challenge int32 - binary.Read(rand.Reader, binary.LittleEndian, &challenge) + if err := binary.Read(rand.Reader, binary.LittleEndian, &challenge); nil != err { + return nil, err + } // Create the packet from the challenge, typ and command // and compile it to its byte payload packet := NewPacket(challenge, typ, command) payload, err := packet.Compile() + if nil != err { + return nil, err + } - var n int - + n, err := c.Connection.Write(payload) if nil != err { - return - } else if n, err = c.Connection.Write(payload); nil != err { - return - } else if n != len(payload) { - err = ErrInvalidWrite - return + return nil, err + } + if n != len(payload) { + return nil, ErrInvalidWrite } var header Header - - if err = binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { + return nil, err } if packet.Header.Type == Auth && header.Type == ResponseValue { // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization. - c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))) + if _, err := c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))); nil != err { + return nil, err + } // Reread the packet header. - if err = binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { + return nil, err } } if header.Challenge != packet.Header.Challenge { - err = ErrInvalidChallenge - return + return nil, ErrInvalidChallenge } body := make([]byte, header.Size-int32(PacketHeaderSize)) n, err = c.Connection.Read(body) - for n < len(body) { var nBytes int nBytes, err = c.Connection.Read(body[n:]) if err != nil { - return + return nil, err } n += nBytes } + // Shouldn't this be moved up to the first read? if nil != err { - return - } else if n != len(body) { - err = ErrInvalidRead - return + return nil, err + } + if n != len(body) { + return nil, ErrInvalidRead } - response = new(Packet) + response := new(Packet) response.Header = header response.Body = strings.TrimRight(string(body), TerminationSequence) - return + return response, nil } // NewClient creates a new Client type, creating the connection diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go index 0de79d94a3c77..e953b3c2b5d7f 100644 --- a/plugins/inputs/minecraft/minecraft.go +++ b/plugins/inputs/minecraft/minecraft.go @@ -50,17 +50,8 @@ func (s *Minecraft) SampleConfig() string { func (s *Minecraft) Gather(acc telegraf.Accumulator) error { if s.client == nil { - connector, err := NewConnector(s.Server, s.Port, s.Password) - if err != nil { - return err - } - - client, err := NewClient(connector) - if err != nil { - return err - } - - s.client = client + connector := newConnector(s.Server, s.Port, s.Password) + s.client = newClient(connector) } players, err := s.client.Players() diff --git a/plugins/inputs/mock_Plugin.go b/plugins/inputs/mock_Plugin.go deleted file mode 100644 index 4dec121bc7b6f..0000000000000 --- a/plugins/inputs/mock_Plugin.go +++ /dev/null @@ -1,31 +0,0 @@ -package inputs - -import ( - "github.com/influxdata/telegraf" - - "github.com/stretchr/testify/mock" -) - -// MockPlugin struct should be named the same as the Plugin -type MockPlugin struct { - mock.Mock -} - -// Description will appear directly above the plugin definition in the config file -func (m *MockPlugin) Description() string { - return `This is an example plugin` -} - -// SampleConfig will populate the sample configuration portion of the plugin's configuration -func (m *MockPlugin) SampleConfig() string { - return ` sampleVar = 'foo'` -} - -// Gather defines what data the plugin will gather. -func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { - ret := m.Called(_a0) - - r0 := ret.Error(0) - - return r0 -} diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 3c568b5e6e5e7..9f4cf5e37487c 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -9,7 +9,7 @@ Registers via Modbus TCP or Modbus RTU/ASCII. [[inputs.modbus]] ## Connection Configuration ## - ## The plugin supports connections to PLCs via MODBUS/TCP or + ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or ## via serial line communication in binary (RTU) or readable (ASCII) encoding ## ## Device name @@ -29,15 +29,18 @@ Registers via Modbus TCP or Modbus RTU/ASCII. # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" - + ## Serial (RS485; RS232) # controller = "file:///dev/ttyUSB0" # baud_rate = 9600 # data_bits = 8 # parity = "N" # stop_bits = 1 - # transmission_mode = "RTU" + ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" + ## default behaviour is "TCP" if the controller is TCP + ## For Serial you can choose between "RTU" and "ASCII" + # transmission_mode = "RTU" ## Measurements ## @@ -67,7 +70,7 @@ Registers via Modbus TCP or Modbus RTU/ASCII. ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) ## FLOAT32 (deprecated), FIXED, UFIXED (fixed-point representation on input) ## scale - the final numeric variable representation ## address - variable address @@ -96,7 +99,7 @@ Metric are custom and configured using the `discrete_inputs`, `coils`, The field `data_type` defines the representation of the data value on input from the modbus registers. The input values are then converted from the given `data_type` to a type that is apropriate when -sending the value to the output plugin. These output types are usually one of string, +sending the value to the output plugin. These output types are usually one of string, integer or floating-point-number. The size of the output type is assumed to be large enough for all supported input types. The mapping from the input type to the output type is fixed and cannot be configured. @@ -105,16 +108,16 @@ and cannot be configured. These types are used for integer input values. Select the one that matches your modbus data source. -#### Floating Point: `FLOAT32-IEEE` +#### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` -Use this type if your modbus registers contain a value that is encoded in this format. This type -always includes the sign and therefore there exists no variant. +Use these types if your modbus registers contain a value that is encoded in this format. These types +always include the sign and therefore there exists no variant. #### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) These types are handled as an integer type on input, but are converted to floating point representation for further processing (e.g. scaling). Use one of these types when the input value is a decimal fixed point -representation of a non-integer value. +representation of a non-integer value. Select the type `UFIXED` when the input type is declared to hold unsigned integer values, which cannot be negative. The documentation of your modbus device should indicate this by a term like @@ -127,6 +130,20 @@ with N decimal places'. (FLOAT32 is deprecated and should not be used any more. UFIXED provides the same conversion from unsigned values). +### Trouble shooting +Modbus documentations are often a mess. People confuse memory-address (starts at one) and register address (starts at zero) or stay unclear about the used word-order. Furthermore, there are some non-standard implementations that also +swap the bytes within the register word (16-bit). + +If you get an error or don't get the expected values from your device, you can try the following steps (assuming a 32-bit value). + +In case are using a serial device and get an `permission denied` error, please check the permissions of your serial device and change accordingly. + +In case you get an `exception '2' (illegal data address)` error you might try to offset your `address` entries by minus one as it is very likely that there is a confusion between memory and register addresses. + +In case you see strange values, the `byte_order` might be off. You can either probe all combinations (`ABCD`, `CDBA`, `BADC` or `DCBA`) or you set `byte_order="ABCD" data_type="UINT32"` and use the resulting value(s) in an online converter like [this](https://www.scadacore.com/tools/programming-calculators/online-hex-converter/). This makes especially sense if you don't want to mess with the device, deal with 64-bit values and/or don't know the `data_type` of your register (e.g. fix-point floating values vs. IEEE floating point). + +If nothing helps, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). + ### Example Output ```sh diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go new file mode 100644 index 0000000000000..143f12867dea6 --- /dev/null +++ b/plugins/inputs/modbus/configuration.go @@ -0,0 +1,63 @@ +//go:build !openbsd + +package modbus + +import "fmt" + +const ( + maxQuantityDiscreteInput = uint16(2000) + maxQuantityCoils = uint16(2000) + maxQuantityInputRegisters = uint16(125) + maxQuantityHoldingRegisters = uint16(125) +) + +type Configuration interface { + Check() error + Process() (map[byte]requestSet, error) +} + +func removeDuplicates(elements []uint16) []uint16 { + encountered := map[uint16]bool{} + result := []uint16{} + + for _, addr := range elements { + if !encountered[addr] { + encountered[addr] = true + result = append(result, addr) + } + } + + return result +} + +func normalizeInputDatatype(dataType string) (string, error) { + switch dataType { + case "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64", "FLOAT32", "FLOAT64": + return dataType, nil + } + return "unknown", fmt.Errorf("unknown type %q", dataType) +} + +func normalizeOutputDatatype(dataType string) (string, error) { + switch dataType { + case "", "native": + return "native", nil + case "INT64", "UINT64", "FLOAT64": + return dataType, nil + } + return "unknown", fmt.Errorf("unknown type %q", dataType) +} + +func normalizeByteOrder(byteOrder string) (string, error) { + switch byteOrder { + case "ABCD", "MSW-BE", "MSW": // Big endian (Motorola) + return "ABCD", nil + case "BADC", "MSW-LE": // Big endian with bytes swapped + return "BADC", nil + case "CDAB", "LSW-BE": // Little endian with bytes swapped + return "CDAB", nil + case "DCBA", "LSW-LE", "LSW": // Little endian (Intel) + return "DCBA", nil + } + return "unknown", fmt.Errorf("unknown byte-order %q", byteOrder) +} diff --git a/plugins/inputs/modbus/configuration_original.go b/plugins/inputs/modbus/configuration_original.go new file mode 100644 index 0000000000000..78861df74e0f7 --- /dev/null +++ b/plugins/inputs/modbus/configuration_original.go @@ -0,0 +1,248 @@ +//go:build !openbsd + +package modbus + +import ( + "fmt" +) + +type fieldDefinition struct { + Measurement string `toml:"measurement"` + Name string `toml:"name"` + ByteOrder string `toml:"byte_order"` + DataType string `toml:"data_type"` + Scale float64 `toml:"scale"` + Address []uint16 `toml:"address"` +} + +type ConfigurationOriginal struct { + SlaveID byte `toml:"slave_id"` + DiscreteInputs []fieldDefinition `toml:"discrete_inputs"` + Coils []fieldDefinition `toml:"coils"` + HoldingRegisters []fieldDefinition `toml:"holding_registers"` + InputRegisters []fieldDefinition `toml:"input_registers"` +} + +func (c *ConfigurationOriginal) Process() (map[byte]requestSet, error) { + coil, err := c.initRequests(c.Coils, cCoils, maxQuantityCoils) + if err != nil { + return nil, err + } + + discrete, err := c.initRequests(c.DiscreteInputs, cDiscreteInputs, maxQuantityDiscreteInput) + if err != nil { + return nil, err + } + + holding, err := c.initRequests(c.HoldingRegisters, cHoldingRegisters, maxQuantityHoldingRegisters) + if err != nil { + return nil, err + } + + input, err := c.initRequests(c.InputRegisters, cInputRegisters, maxQuantityInputRegisters) + if err != nil { + return nil, err + } + + return map[byte]requestSet{ + c.SlaveID: { + coil: coil, + discrete: discrete, + holding: holding, + input: input, + }, + }, nil +} + +func (c *ConfigurationOriginal) Check() error { + if err := c.validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.Coils, cCoils); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { + return err + } + + return c.validateFieldDefinitions(c.InputRegisters, cInputRegisters) +} + +func (c *ConfigurationOriginal) initRequests(fieldDefs []fieldDefinition, registerType string, maxQuantity uint16) ([]request, error) { + fields, err := c.initFields(fieldDefs) + if err != nil { + return nil, err + } + return newRequestsFromFields(fields, c.SlaveID, registerType, maxQuantity), nil +} + +func (c *ConfigurationOriginal) initFields(fieldDefs []fieldDefinition) ([]field, error) { + // Construct the fields from the field definitions + fields := make([]field, 0, len(fieldDefs)) + for _, def := range fieldDefs { + f, err := c.newFieldFromDefinition(def) + if err != nil { + return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err) + } + fields = append(fields, f) + } + + return fields, nil +} + +func (c *ConfigurationOriginal) newFieldFromDefinition(def fieldDefinition) (field, error) { + // Check if the addresses are consecutive + expected := def.Address[0] + for _, current := range def.Address[1:] { + expected++ + if current != expected { + return field{}, fmt.Errorf("addresses of field %q are not consecutive", def.Name) + } + } + + // Initialize the field + f := field{ + measurement: def.Measurement, + name: def.Name, + scale: def.Scale, + address: def.Address[0], + length: uint16(len(def.Address)), + } + if def.DataType != "" { + inType, err := c.normalizeInputDatatype(def.DataType, len(def.Address)) + if err != nil { + return f, err + } + outType, err := c.normalizeOutputDatatype(def.DataType) + if err != nil { + return f, err + } + byteOrder, err := c.normalizeByteOrder(def.ByteOrder) + if err != nil { + return f, err + } + + f.converter, err = determineConverter(inType, byteOrder, outType, def.Scale) + if err != nil { + return f, err + } + } + + return f, nil +} + +func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error { + nameEncountered := map[string]bool{} + for _, item := range fieldDefs { + //check empty name + if item.Name == "" { + return fmt.Errorf("empty name in '%s'", registerType) + } + + //search name duplicate + canonicalName := item.Measurement + "." + item.Name + if nameEncountered[canonicalName] { + return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, registerType, item.Name) + } + nameEncountered[canonicalName] = true + + if registerType == cInputRegisters || registerType == cHoldingRegisters { + // search byte order + switch item.ByteOrder { + case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": + default: + return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, registerType, item.Name) + } + + // search data type + switch item.DataType { + case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": + default: + return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, registerType, item.Name) + } + + // check scale + if item.Scale == 0.0 { + return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, registerType, item.Name) + } + } + + // check address + if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { + return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + } + + if registerType == cInputRegisters || registerType == cHoldingRegisters { + if 2*len(item.Address) != len(item.ByteOrder) { + return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, registerType, item.Name) + } + + // search duplicated + if len(item.Address) > len(removeDuplicates(item.Address)) { + return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, registerType, item.Name) + } + } else if len(item.Address) != 1 { + return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + } + } + return nil +} + +func (c *ConfigurationOriginal) normalizeInputDatatype(dataType string, words int) (string, error) { + // Handle our special types + switch dataType { + case "FIXED": + switch words { + case 1: + return "INT16", nil + case 2: + return "INT32", nil + case 4: + return "INT64", nil + default: + return "unknown", fmt.Errorf("invalid length %d for type %q", words, dataType) + } + case "FLOAT32", "UFIXED": + switch words { + case 1: + return "UINT16", nil + case 2: + return "UINT32", nil + case 4: + return "UINT64", nil + default: + return "unknown", fmt.Errorf("invalid length %d for type %q", words, dataType) + } + case "FLOAT32-IEEE": + return "FLOAT32", nil + case "FLOAT64-IEEE": + return "FLOAT64", nil + } + return normalizeInputDatatype(dataType) +} + +func (c *ConfigurationOriginal) normalizeOutputDatatype(dataType string) (string, error) { + // Handle our special types + switch dataType { + case "FIXED", "FLOAT32", "UFIXED": + return "FLOAT64", nil + } + return normalizeOutputDatatype("native") +} + +func (c *ConfigurationOriginal) normalizeByteOrder(byteOrder string) (string, error) { + // Handle our special types + switch byteOrder { + case "AB", "ABCDEFGH": + return "ABCD", nil + case "BADCFEHG": + return "BADC", nil + case "GHEFCDAB": + return "CDAB", nil + case "BA", "HGFEDCBA": + return "DCBA", nil + } + return normalizeByteOrder(byteOrder) +} diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index ec68890c5eb91..4769d6bd0342b 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -1,68 +1,64 @@ +//go:build !openbsd + package modbus import ( - "encoding/binary" "fmt" - "log" - "math" "net" "net/url" - "sort" + "strconv" "time" - mb "github.com/goburrow/modbus" + mb "github.com/grid-x/modbus" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" ) // Modbus holds all data relevant to the plugin type Modbus struct { - Name string `toml:"name"` - Controller string `toml:"controller"` - TransmissionMode string `toml:"transmission_mode"` - BaudRate int `toml:"baud_rate"` - DataBits int `toml:"data_bits"` - Parity string `toml:"parity"` - StopBits int `toml:"stop_bits"` - SlaveID int `toml:"slave_id"` - Timeout internal.Duration `toml:"timeout"` - Retries int `toml:"busy_retries"` - RetriesWaitTime internal.Duration `toml:"busy_retries_wait"` - DiscreteInputs []fieldContainer `toml:"discrete_inputs"` - Coils []fieldContainer `toml:"coils"` - HoldingRegisters []fieldContainer `toml:"holding_registers"` - InputRegisters []fieldContainer `toml:"input_registers"` - registers []register - isConnected bool - tcpHandler *mb.TCPClientHandler - rtuHandler *mb.RTUClientHandler - asciiHandler *mb.ASCIIClientHandler - client mb.Client -} - -type register struct { - Type string - RegistersRange []registerRange - Fields []fieldContainer -} - -type fieldContainer struct { - Measurement string `toml:"measurement"` - Name string `toml:"name"` - ByteOrder string `toml:"byte_order"` - DataType string `toml:"data_type"` - Scale float64 `toml:"scale"` - Address []uint16 `toml:"address"` + Name string `toml:"name"` + Controller string `toml:"controller"` + TransmissionMode string `toml:"transmission_mode"` + BaudRate int `toml:"baud_rate"` + DataBits int `toml:"data_bits"` + Parity string `toml:"parity"` + StopBits int `toml:"stop_bits"` + Timeout config.Duration `toml:"timeout"` + Retries int `toml:"busy_retries"` + RetriesWaitTime config.Duration `toml:"busy_retries_wait"` + Log telegraf.Logger `toml:"-"` + // Register configuration + ConfigurationOriginal + // Connection handling + client mb.Client + handler mb.ClientHandler + isConnected bool + // Request handling + requests map[byte]requestSet +} + +type fieldConverterFunc func(bytes []byte) interface{} + +type requestSet struct { + coil []request + discrete []request + holding []request + input []request +} + +type field struct { + measurement string + name string + scale float64 + address uint16 + length uint16 + converter fieldConverterFunc value interface{} } -type registerRange struct { - address uint16 - length uint16 -} - const ( cDiscreteInputs = "discrete_input" cCoils = "coil" @@ -74,7 +70,7 @@ const description = `Retrieve data from MODBUS slave devices` const sampleConfig = ` ## Connection Configuration ## - ## The plugin supports connections to PLCs via MODBUS/TCP or + ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or ## via serial line communication in binary (RTU) or readable (ASCII) encoding ## ## Device name @@ -94,16 +90,19 @@ const sampleConfig = ` # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" - + ## Serial (RS485; RS232) # controller = "file:///dev/ttyUSB0" # baud_rate = 9600 # data_bits = 8 # parity = "N" # stop_bits = 1 - # transmission_mode = "RTU" - + ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" + ## default behaviour is "TCP" if the controller is TCP + ## For Serial you can choose between "RTU" and "ASCII" + # transmission_mode = "RTU" + ## Measurements ## @@ -132,7 +131,8 @@ const sampleConfig = ` ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, + ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) ## scale - the final numeric variable representation ## address - variable address @@ -172,76 +172,74 @@ func (m *Modbus) Init() error { return fmt.Errorf("retries cannot be negative") } - err := m.InitRegister(m.DiscreteInputs, cDiscreteInputs) - if err != nil { - return err + // Check and process the configuration + if err := m.ConfigurationOriginal.Check(); err != nil { + return fmt.Errorf("original configuraton invalid: %v", err) } - err = m.InitRegister(m.Coils, cCoils) + r, err := m.ConfigurationOriginal.Process() if err != nil { - return err + return fmt.Errorf("cannot process original configuraton: %v", err) } + m.requests = r - err = m.InitRegister(m.HoldingRegisters, cHoldingRegisters) - if err != nil { - return err - } - - err = m.InitRegister(m.InputRegisters, cInputRegisters) - if err != nil { - return err + // Setup client + if err := m.initClient(); err != nil { + return fmt.Errorf("initializing client failed: %v", err) } return nil } -func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { - if len(fields) == 0 { - return nil - } - - err := validateFieldContainers(fields, name) - if err != nil { - return err +// Gather implements the telegraf plugin interface method for data accumulation +func (m *Modbus) Gather(acc telegraf.Accumulator) error { + if !m.isConnected { + if err := m.connect(); err != nil { + return err + } } - addrs := []uint16{} - for _, field := range fields { - for _, a := range field.Address { - addrs = append(addrs, a) + timestamp := time.Now() + for retry := 0; retry <= m.Retries; retry++ { + timestamp = time.Now() + if err := m.gatherFields(); err != nil { + if mberr, ok := err.(*mb.Error); ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { + m.Log.Infof("Device busy! Retrying %d more time(s)...", m.Retries-retry) + time.Sleep(time.Duration(m.RetriesWaitTime)) + continue + } + // Show the disconnect error this way to not shadow the initial error + if discerr := m.disconnect(); discerr != nil { + m.Log.Errorf("Disconnecting failed: %v", discerr) + } + return err } + // Reading was successful, leave the retry loop + break } - addrs = removeDuplicates(addrs) - sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + for slaveID, requests := range m.requests { + tags := map[string]string{ + "name": m.Name, + "type": cCoils, + "slave_id": strconv.Itoa(int(slaveID)), + } + m.collectFields(acc, timestamp, tags, requests.coil) - ii := 0 - var registersRange []registerRange + tags["type"] = cDiscreteInputs + m.collectFields(acc, timestamp, tags, requests.discrete) - // Get range of consecutive integers - // [1, 2, 3, 5, 6, 10, 11, 12, 14] - // (1, 3) , (5, 2) , (10, 3), (14 , 1) - for range addrs { - if ii < len(addrs) { - start := addrs[ii] - end := start + tags["type"] = cHoldingRegisters + m.collectFields(acc, timestamp, tags, requests.holding) - for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 { - end = addrs[ii+1] - ii++ - } - ii++ - registersRange = append(registersRange, registerRange{start, end - start + 1}) - } + tags["type"] = cInputRegisters + m.collectFields(acc, timestamp, tags, requests.input) } - m.registers = append(m.registers, register{name, registersRange, fields}) - return nil } -// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] -func connect(m *Modbus) error { +func (m *Modbus) initClient() error { u, err := url.Parse(m.Controller) if err != nil { return err @@ -249,481 +247,200 @@ func connect(m *Modbus) error { switch u.Scheme { case "tcp": - var host, port string - host, port, err = net.SplitHostPort(u.Host) + host, port, err := net.SplitHostPort(u.Host) if err != nil { return err } - m.tcpHandler = mb.NewTCPClientHandler(host + ":" + port) - m.tcpHandler.Timeout = m.Timeout.Duration - m.tcpHandler.SlaveId = byte(m.SlaveID) - m.client = mb.NewClient(m.tcpHandler) - err := m.tcpHandler.Connect() - if err != nil { - return err + switch m.TransmissionMode { + case "RTUoverTCP": + handler := mb.NewRTUOverTCPClientHandler(host + ":" + port) + handler.Timeout = time.Duration(m.Timeout) + m.handler = handler + case "ASCIIoverTCP": + handler := mb.NewASCIIOverTCPClientHandler(host + ":" + port) + handler.Timeout = time.Duration(m.Timeout) + m.handler = handler + default: + handler := mb.NewTCPClientHandler(host + ":" + port) + handler.Timeout = time.Duration(m.Timeout) + m.handler = handler } - m.isConnected = true - return nil case "file": - if m.TransmissionMode == "RTU" { - m.rtuHandler = mb.NewRTUClientHandler(u.Path) - m.rtuHandler.Timeout = m.Timeout.Duration - m.rtuHandler.SlaveId = byte(m.SlaveID) - m.rtuHandler.BaudRate = m.BaudRate - m.rtuHandler.DataBits = m.DataBits - m.rtuHandler.Parity = m.Parity - m.rtuHandler.StopBits = m.StopBits - m.client = mb.NewClient(m.rtuHandler) - err := m.rtuHandler.Connect() - if err != nil { - return err - } - m.isConnected = true - return nil - } else if m.TransmissionMode == "ASCII" { - m.asciiHandler = mb.NewASCIIClientHandler(u.Path) - m.asciiHandler.Timeout = m.Timeout.Duration - m.asciiHandler.SlaveId = byte(m.SlaveID) - m.asciiHandler.BaudRate = m.BaudRate - m.asciiHandler.DataBits = m.DataBits - m.asciiHandler.Parity = m.Parity - m.asciiHandler.StopBits = m.StopBits - m.client = mb.NewClient(m.asciiHandler) - err := m.asciiHandler.Connect() - if err != nil { - return err - } - m.isConnected = true - return nil - } else { + switch m.TransmissionMode { + case "RTU": + handler := mb.NewRTUClientHandler(u.Path) + handler.Timeout = time.Duration(m.Timeout) + handler.BaudRate = m.BaudRate + handler.DataBits = m.DataBits + handler.Parity = m.Parity + handler.StopBits = m.StopBits + m.handler = handler + case "ASCII": + handler := mb.NewASCIIClientHandler(u.Path) + handler.Timeout = time.Duration(m.Timeout) + handler.BaudRate = m.BaudRate + handler.DataBits = m.DataBits + handler.Parity = m.Parity + handler.StopBits = m.StopBits + m.handler = handler + default: return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) } default: - return fmt.Errorf("invalid controller") + return fmt.Errorf("invalid controller %q", m.Controller) } -} -func disconnect(m *Modbus) error { - u, err := url.Parse(m.Controller) - if err != nil { - return err - } + m.handler.SetSlave(m.SlaveID) + m.client = mb.NewClient(m.handler) + m.isConnected = false - switch u.Scheme { - case "tcp": - m.tcpHandler.Close() - return nil - case "file": - if m.TransmissionMode == "RTU" { - m.rtuHandler.Close() - return nil - } else if m.TransmissionMode == "ASCII" { - m.asciiHandler.Close() - return nil - } else { - return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) - } - default: - return fmt.Errorf("invalid controller") - } + return nil } -func validateFieldContainers(t []fieldContainer, n string) error { - nameEncountered := map[string]bool{} - for _, item := range t { - //check empty name - if item.Name == "" { - return fmt.Errorf("empty name in '%s'", n) - } - - //search name duplicate - canonical_name := item.Measurement + "." + item.Name - if nameEncountered[canonical_name] { - return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) - } else { - nameEncountered[canonical_name] = true - } - - if n == cInputRegisters || n == cHoldingRegisters { - // search byte order - switch item.ByteOrder { - case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": - break - default: - return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, n, item.Name) - } +// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] +func (m *Modbus) connect() error { + err := m.handler.Connect() + m.isConnected = err == nil + return err +} - // search data type - switch item.DataType { - case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT32", "FIXED", "UFIXED": - break - default: - return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name) - } +func (m *Modbus) disconnect() error { + err := m.handler.Close() + m.isConnected = false + return err +} - // check scale - if item.Scale == 0.0 { - return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, n, item.Name) - } +func (m *Modbus) gatherFields() error { + for _, requests := range m.requests { + if err := m.gatherRequestsCoil(requests.coil); err != nil { + return err } - - // check address - if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { - return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + if err := m.gatherRequestsDiscrete(requests.discrete); err != nil { + return err } - - if n == cInputRegisters || n == cHoldingRegisters { - if 2*len(item.Address) != len(item.ByteOrder) { - return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, n, item.Name) - } - - // search duplicated - if len(item.Address) > len(removeDuplicates(item.Address)) { - return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, n, item.Name) - } - } else if len(item.Address) != 1 { - return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + if err := m.gatherRequestsHolding(requests.holding); err != nil { + return err } - } - return nil -} - -func removeDuplicates(elements []uint16) []uint16 { - encountered := map[uint16]bool{} - result := []uint16{} - - for v := range elements { - if encountered[elements[v]] { - } else { - encountered[elements[v]] = true - result = append(result, elements[v]) + if err := m.gatherRequestsInput(requests.input); err != nil { + return err } } - return result -} - -func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) { - if rt == cDiscreteInputs { - return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length)) - } else if rt == cCoils { - return m.client.ReadCoils(uint16(rr.address), uint16(rr.length)) - } else if rt == cInputRegisters { - return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length)) - } else if rt == cHoldingRegisters { - return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length)) - } else { - return []byte{}, fmt.Errorf("not Valid function") - } + return nil } -func (m *Modbus) getFields() error { - for _, register := range m.registers { - rawValues := make(map[uint16][]byte) - bitRawValues := make(map[uint16]uint16) - for _, rr := range register.RegistersRange { - address := rr.address - readValues, err := readRegisterValues(m, register.Type, rr) - if err != nil { - return err - } - - // Raw Values - if register.Type == cDiscreteInputs || register.Type == cCoils { - for _, readValue := range readValues { - for bitPosition := 0; bitPosition < 8; bitPosition++ { - bitRawValues[address] = getBitValue(readValue, bitPosition) - address = address + 1 - if address+1 > rr.length { - break - } - } - } - } - - // Raw Values - if register.Type == cInputRegisters || register.Type == cHoldingRegisters { - batchSize := 2 - for batchSize < len(readValues) { - rawValues[address] = readValues[0:batchSize:batchSize] - address = address + 1 - readValues = readValues[batchSize:] - } - - rawValues[address] = readValues[0:batchSize:batchSize] - } - } - - if register.Type == cDiscreteInputs || register.Type == cCoils { - for i := 0; i < len(register.Fields); i++ { - register.Fields[i].value = bitRawValues[register.Fields[i].Address[0]] - } +func (m *Modbus) gatherRequestsCoil(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read coil@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadCoils(request.address, request.length) + if err != nil { + return err } + m.Log.Debugf("got coil@%v[%v]: %v", request.address, request.length, bytes) - if register.Type == cInputRegisters || register.Type == cHoldingRegisters { - for i := 0; i < len(register.Fields); i++ { - var values_t []byte - - for j := 0; j < len(register.Fields[i].Address); j++ { - tempArray := rawValues[register.Fields[i].Address[j]] - for x := 0; x < len(tempArray); x++ { - values_t = append(values_t, tempArray[x]) - } - } - - register.Fields[i].value = convertDataType(register.Fields[i], values_t) - } + // Bit value handling + for i, field := range request.fields { + offset := field.address - request.address + idx := offset / 8 + bit := offset % 8 + request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) + m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) } } - return nil } -func getBitValue(n byte, pos int) uint16 { - return uint16(n >> uint(pos) & 0x01) -} - -func convertDataType(t fieldContainer, bytes []byte) interface{} { - switch t.DataType { - case "UINT16": - e16 := convertEndianness16(t.ByteOrder, bytes) - return scaleUint16(t.Scale, e16) - case "INT16": - e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := int16(e16) - return scaleInt16(t.Scale, f16) - case "UINT32": - e32 := convertEndianness32(t.ByteOrder, bytes) - return scaleUint32(t.Scale, e32) - case "INT32": - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := int32(e32) - return scaleInt32(t.Scale, f32) - case "UINT64": - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := format64(t.DataType, e64).(uint64) - return scaleUint64(t.Scale, f64) - case "INT64": - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := format64(t.DataType, e64).(int64) - return scaleInt64(t.Scale, f64) - case "FLOAT32-IEEE": - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := math.Float32frombits(e32) - return scaleFloat32(t.Scale, f32) - case "FIXED": - if len(bytes) == 2 { - e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := int16(e16) - return scale16toFloat(t.Scale, f16) - } else if len(bytes) == 4 { - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := int32(e32) - return scale32toFloat(t.Scale, f32) - } else { - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := int64(e64) - return scale64toFloat(t.Scale, f64) - } - case "FLOAT32", "UFIXED": - if len(bytes) == 2 { - e16 := convertEndianness16(t.ByteOrder, bytes) - return scale16UtoFloat(t.Scale, e16) - } else if len(bytes) == 4 { - e32 := convertEndianness32(t.ByteOrder, bytes) - return scale32UtoFloat(t.Scale, e32) - } else { - e64 := convertEndianness64(t.ByteOrder, bytes) - return scale64UtoFloat(t.Scale, e64) +func (m *Modbus) gatherRequestsDiscrete(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read discrete@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadDiscreteInputs(request.address, request.length) + if err != nil { + return err } - default: - return 0 - } -} - -func convertEndianness16(o string, b []byte) uint16 { - switch o { - case "AB": - return binary.BigEndian.Uint16(b) - case "BA": - return binary.LittleEndian.Uint16(b) - default: - return 0 - } -} + m.Log.Debugf("got discrete@%v[%v]: %v", request.address, request.length, bytes) -func convertEndianness32(o string, b []byte) uint32 { - switch o { - case "ABCD": - return binary.BigEndian.Uint32(b) - case "DCBA": - return binary.LittleEndian.Uint32(b) - case "BADC": - return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) - case "CDAB": - return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) - default: - return 0 - } -} + // Bit value handling + for i, field := range request.fields { + offset := field.address - request.address + idx := offset / 8 + bit := offset % 8 -func convertEndianness64(o string, b []byte) uint64 { - switch o { - case "ABCDEFGH": - return binary.BigEndian.Uint64(b) - case "HGFEDCBA": - return binary.LittleEndian.Uint64(b) - case "BADCFEHG": - return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) - case "GHEFCDAB": - return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) - default: - return 0 + request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) + m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) + } } + return nil } -func format16(f string, r uint16) interface{} { - switch f { - case "UINT16": - return r - case "INT16": - return int16(r) - default: - return r - } -} +func (m *Modbus) gatherRequestsHolding(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read holding@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadHoldingRegisters(request.address, request.length) + if err != nil { + return err + } + m.Log.Debugf("got holding@%v[%v]: %v", request.address, request.length, bytes) -func format32(f string, r uint32) interface{} { - switch f { - case "UINT32": - return r - case "INT32": - return int32(r) - case "FLOAT32-IEEE": - return math.Float32frombits(r) - default: - return r - } -} + // Non-bit value handling + for i, field := range request.fields { + // Determine the offset of the field values in the read array + offset := 2 * (field.address - request.address) // registers are 16bit = 2 byte + length := 2 * field.length // field length is in registers a 16bit -func format64(f string, r uint64) interface{} { - switch f { - case "UINT64": - return r - case "INT64": - return int64(r) - default: - return r + // Convert the actual value + request.fields[i].value = field.converter(bytes[offset : offset+length]) + m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) + } } + return nil } -func scale16toFloat(s float64, v int16) float64 { - return float64(v) * s -} - -func scale32toFloat(s float64, v int32) float64 { - return float64(float64(v) * float64(s)) -} - -func scale64toFloat(s float64, v int64) float64 { - return float64(float64(v) * float64(s)) -} - -func scale16UtoFloat(s float64, v uint16) float64 { - return float64(v) * s -} - -func scale32UtoFloat(s float64, v uint32) float64 { - return float64(float64(v) * float64(s)) -} - -func scale64UtoFloat(s float64, v uint64) float64 { - return float64(float64(v) * float64(s)) -} - -func scaleInt16(s float64, v int16) int16 { - return int16(float64(v) * s) -} - -func scaleUint16(s float64, v uint16) uint16 { - return uint16(float64(v) * s) -} - -func scaleUint32(s float64, v uint32) uint32 { - return uint32(float64(v) * float64(s)) -} - -func scaleInt32(s float64, v int32) int32 { - return int32(float64(v) * float64(s)) -} - -func scaleFloat32(s float64, v float32) float32 { - return float32(float64(v) * s) -} - -func scaleUint64(s float64, v uint64) uint64 { - return uint64(float64(v) * float64(s)) -} - -func scaleInt64(s float64, v int64) int64 { - return int64(float64(v) * float64(s)) -} - -// Gather implements the telegraf plugin interface method for data accumulation -func (m *Modbus) Gather(acc telegraf.Accumulator) error { - if !m.isConnected { - err := connect(m) +func (m *Modbus) gatherRequestsInput(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read input@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadInputRegisters(request.address, request.length) if err != nil { - m.isConnected = false return err } - } + m.Log.Debugf("got input@%v[%v]: %v", request.address, request.length, bytes) - timestamp := time.Now() - for retry := 0; retry <= m.Retries; retry += 1 { - timestamp = time.Now() - err := m.getFields() - if err != nil { - mberr, ok := err.(*mb.ModbusError) - if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { - log.Printf("I! [inputs.modbus] device busy! Retrying %d more time(s)...", m.Retries-retry) - time.Sleep(m.RetriesWaitTime.Duration) - continue - } - disconnect(m) - m.isConnected = false - return err + // Non-bit value handling + for i, field := range request.fields { + // Determine the offset of the field values in the read array + offset := 2 * (field.address - request.address) // registers are 16bit = 2 byte + length := 2 * field.length // field length is in registers a 16bit + + // Convert the actual value + request.fields[i].value = field.converter(bytes[offset : offset+length]) + m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) } - // Reading was successful, leave the retry loop - break } + return nil +} +func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, tags map[string]string, requests []request) { grouper := metric.NewSeriesGrouper() - for _, reg := range m.registers { - tags := map[string]string{ - "name": m.Name, - "type": reg.Type, - } - - for _, field := range reg.Fields { + for _, request := range requests { + for _, field := range request.fields { // In case no measurement was specified we use "modbus" as default measurement := "modbus" - if field.Measurement != "" { - measurement = field.Measurement + if field.measurement != "" { + measurement = field.measurement } // Group the data by series - grouper.Add(measurement, tags, timestamp, field.Name, field.value) - } - - // Add the metrics grouped by series to the accumulator - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + if err := grouper.Add(measurement, tags, timestamp, field.name, field.value); err != nil { + acc.AddError(fmt.Errorf("cannot add field %q for measurement %q: %v", field.name, measurement, err)) + continue + } } } - return nil + // Add the metrics grouped by series to the accumulator + for _, x := range grouper.Metrics() { + acc.AddMetric(x) + } } // Add this plugin to telegraf diff --git a/plugins/inputs/modbus/modbus_openbsd.go b/plugins/inputs/modbus/modbus_openbsd.go new file mode 100644 index 0000000000000..6cc2bfeb3b8fd --- /dev/null +++ b/plugins/inputs/modbus/modbus_openbsd.go @@ -0,0 +1,3 @@ +//go:build openbsd + +package modbus diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 8c5241dc2aaee..4f9f4eca39434 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -1,12 +1,18 @@ +//go:build !openbsd + package modbus import ( + "fmt" + "strconv" "testing" + "time" - m "github.com/goburrow/modbus" - "github.com/stretchr/testify/assert" + mb "github.com/grid-x/modbus" + "github.com/stretchr/testify/require" "github.com/tbrandon/mbserver" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" ) @@ -77,43 +83,52 @@ func TestCoils(t *testing.T) { } serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - assert.NoError(t, err) - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) for _, ct := range coilTests { t.Run(ct.name, func(t *testing.T) { - _, err = client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) - assert.NoError(t, err) + _, err := client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) + require.NoError(t, err) modbus := Modbus{ Name: "TestCoils", Controller: "tcp://localhost:1502", - SlaveID: 1, - Coils: []fieldContainer{ - { - Name: ct.name, - Address: []uint16{ct.address}, - }, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: ct.name, + Address: []uint16{ct.address}, }, } - err = modbus.Init() - assert.NoError(t, err) + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + map[string]interface{}{ct.name: ct.read}, + time.Unix(0, 0), + ), + } + var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) - assert.NotEmpty(t, modbus.registers) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, ct.read, coil.Fields[0].value) - } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -549,60 +564,404 @@ func TestHoldingRegisters(t *testing.T) { write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF}, read: uint64(18446742686322259968), }, + { + name: "register214_to_register217_abcdefgh_float64_ieee", + address: []uint16{214, 215, 216, 217}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0xBF, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(-0.02774907295123737), + }, + { + name: "register214_to_register217_abcdefgh_float64_ieee_scaled", + address: []uint16{214, 215, 216, 217}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 0.1, + write: []byte{0xBF, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(-0.002774907295123737), + }, + { + name: "register218_to_register221_abcdefgh_float64_ieee_pos", + address: []uint16{218, 219, 220, 221}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x3F, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(0.02774907295123737), + }, + { + name: "register222_to_register225_hgfecdba_float64_ieee", + address: []uint16{222, 223, 224, 225}, + quantity: 4, + byteOrder: "HGFEDCBA", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x55, 0x8F, 0x47, 0xC3, 0x40, 0x6A, 0x9C, 0xBF}, + read: float64(-0.02774907295123737), + }, + { + name: "register226_to_register229_badcfehg_float64_ieee", + address: []uint16{226, 227, 228, 229}, + quantity: 4, + byteOrder: "BADCFEHG", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x9C, 0xBF, 0x40, 0x6A, 0x47, 0xC3, 0x55, 0x8F}, + read: float64(-0.02774907295123737), + }, + { + name: "register230_to_register233_ghefcdab_float64_ieee", + address: []uint16{230, 231, 232, 233}, + quantity: 4, + byteOrder: "GHEFCDAB", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x8F, 0x55, 0xC3, 0x47, 0x6A, 0x40, 0xBF, 0x9C}, + read: float64(-0.02774907295123737), + }, } serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - assert.NoError(t, err) - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) for _, hrt := range holdingRegisterTests { t.Run(hrt.name, func(t *testing.T) { - _, err = client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) - assert.NoError(t, err) + _, err := client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) + require.NoError(t, err) modbus := Modbus{ Name: "TestHoldingRegisters", Controller: "tcp://localhost:1502", - SlaveID: 1, - HoldingRegisters: []fieldContainer{ - { - Name: hrt.name, - ByteOrder: hrt.byteOrder, - DataType: hrt.dataType, - Scale: hrt.scale, - Address: hrt.address, - }, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = []fieldDefinition{ + { + Name: hrt.name, + ByteOrder: hrt.byteOrder, + DataType: hrt.dataType, + Scale: hrt.scale, + Address: hrt.address, }, } - err = modbus.Init() - assert.NoError(t, err) + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + map[string]interface{}{hrt.name: hrt.read}, + time.Unix(0, 0), + ), + } + var acc testutil.Accumulator - modbus.Gather(&acc) - assert.NotEmpty(t, modbus.registers) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, hrt.read, coil.Fields[0].value) - } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } +func TestReadMultipleCoilWithHole(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + writeValue := uint16(0) + readValue := uint16(0) + for i := 0; i < 14; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + for i := 15; i < 18; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + for i := 24; i < 33; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestReadMultipleCoilWithHole", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{Name: "modbus:MultipleCoilWithHole"}, + } + modbus.SlaveID = 1 + modbus.Coils = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestReadMultipleCoilLimit(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + writeValue := uint16(0) + readValue := uint16(0) + for i := 0; i < 4000; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestReadCoils", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestReadMultipleHoldingRegisterWithHole(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + for i := 0; i < 10; i++ { + fc := fieldDefinition{ + Name: fmt.Sprintf("HoldingRegister-%v", i), + ByteOrder: "AB", + DataType: "INT16", + Scale: 1.0, + Address: []uint16{uint16(i)}, + } + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) + } + for i := 20; i < 30; i++ { + fc := fieldDefinition{ + Name: fmt.Sprintf("HoldingRegister-%v", i), + ByteOrder: "AB", + DataType: "INT16", + Scale: 1.0, + Address: []uint16{uint16(i)}, + } + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestHoldingRegister", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestReadMultipleHoldingRegisterLimit(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + for i := 0; i <= 400; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("HoldingRegister-%v", i) + fc.ByteOrder = "AB" + fc.DataType = "INT16" + fc.Scale = 1.0 + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) + } + + modbus := Modbus{ + Name: "TestHoldingRegister", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + func TestRetrySuccessful(t *testing.T) { retries := 0 maxretries := 2 value := 1 serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() // Make read on coil-registers fail for some trials by making the device @@ -617,44 +976,52 @@ func TestRetrySuccessful(t *testing.T) { if retries >= maxretries { except = &mbserver.Success } - retries += 1 + retries++ return data, except }) - t.Run("retry_success", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetry", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_success", - Address: []uint16{0}, - }, + modbus := Modbus{ + Name: "TestRetry", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_success", + Address: []uint16{0}, + }, + } + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, }, - } + map[string]interface{}{"retry_success": uint16(value)}, + time.Unix(0, 0), + ), + } - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) - assert.NotEmpty(t, modbus.registers) + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, uint16(value), coil.Fields[0].value) - } - }) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } -func TestRetryFail(t *testing.T) { +func TestRetryFailExhausted(t *testing.T) { maxretries := 2 serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() // Make the read on coils fail with busy @@ -667,32 +1034,41 @@ func TestRetryFail(t *testing.T) { return data, &mbserver.SlaveDeviceBusy }) - t.Run("retry_fail", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetryFail", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_fail", - Address: []uint16{0}, - }, - }, - } + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + + err := modbus.Gather(&acc) + require.Error(t, err) + require.Equal(t, "modbus: exception '6' (server device busy), function '129'", err.Error()) +} + +func TestRetryFailIllegal(t *testing.T) { + maxretries := 2 - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.Error(t, err) - }) + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() // Make the read on coils fail with illegal function preventing retry counter := 0 serv.RegisterFunctionHandler(1, func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { - counter += 1 + counter++ data := make([]byte, 2) data[0] = byte(1) data[1] = byte(0) @@ -700,25 +1076,26 @@ func TestRetryFail(t *testing.T) { return data, &mbserver.IllegalFunction }) - t.Run("retry_fail", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetryFail", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_fail", - Address: []uint16{0}, - }, - }, - } + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.Error(t, err) - assert.Equal(t, counter, 1) - }) + err := modbus.Gather(&acc) + require.Error(t, err) + require.Equal(t, "modbus: exception '1' (illegal function), function '129'", err.Error()) + require.Equal(t, counter, 1) } diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go new file mode 100644 index 0000000000000..b2a31d9dcf4d3 --- /dev/null +++ b/plugins/inputs/modbus/request.go @@ -0,0 +1,60 @@ +//go:build !openbsd + +package modbus + +import "sort" + +type request struct { + address uint16 + length uint16 + fields []field +} + +func newRequestsFromFields(fields []field, slaveID byte, registerType string, maxBatchSize uint16) []request { + if len(fields) == 0 { + return nil + } + + // Sort the fields by address (ascending) and length + sort.Slice(fields, func(i, j int) bool { + addrI := fields[i].address + addrJ := fields[j].address + return addrI < addrJ || (addrI == addrJ && fields[i].length > fields[j].length) + }) + + // Construct the consecutive register chunks for the addresses and construct Modbus requests. + // For field addresses like [1, 2, 3, 5, 6, 10, 11, 12, 14] we should construct the following + // requests (1, 3) , (5, 2) , (10, 3), (14 , 1). Furthermore, we should respect field boundaries + // and the given maximum chunk sizes. + var requests []request + + current := request{ + address: fields[0].address, + length: fields[0].length, + fields: []field{fields[0]}, + } + + for _, f := range fields[1:] { + // Check if we need to interrupt the current chunk and require a new one + needInterrupt := f.address != current.address+current.length // not consecutive + needInterrupt = needInterrupt || f.length+current.length > maxBatchSize // too large + + if !needInterrupt { + // Still save to add the field to the current request + current.length += f.length + current.fields = append(current.fields, f) // TODO: omit the field with a future flag + continue + } + + // Finish the current request, add it to the list and construct a new one + requests = append(requests, current) + current = request{ + address: f.address, + length: f.length, + fields: []field{f}, + } + } + requests = append(requests, current) + + return requests +} diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go new file mode 100644 index 0000000000000..88c4b7465a824 --- /dev/null +++ b/plugins/inputs/modbus/type_conversions.go @@ -0,0 +1,56 @@ +//go:build !openbsd + +package modbus + +import "fmt" + +func determineConverter(inType, byteOrder, outType string, scale float64) (fieldConverterFunc, error) { + if scale != 0.0 { + return determineConverterScale(inType, byteOrder, outType, scale) + } + return determineConverterNoScale(inType, byteOrder, outType) +} + +func determineConverterScale(inType, byteOrder, outType string, scale float64) (fieldConverterFunc, error) { + switch inType { + case "INT16": + return determineConverterI16Scale(outType, byteOrder, scale) + case "UINT16": + return determineConverterU16Scale(outType, byteOrder, scale) + case "INT32": + return determineConverterI32Scale(outType, byteOrder, scale) + case "UINT32": + return determineConverterU32Scale(outType, byteOrder, scale) + case "INT64": + return determineConverterI64Scale(outType, byteOrder, scale) + case "UINT64": + return determineConverterU64Scale(outType, byteOrder, scale) + case "FLOAT32": + return determineConverterF32Scale(outType, byteOrder, scale) + case "FLOAT64": + return determineConverterF64Scale(outType, byteOrder, scale) + } + return nil, fmt.Errorf("invalid input data-type: %s", inType) +} + +func determineConverterNoScale(inType, byteOrder, outType string) (fieldConverterFunc, error) { + switch inType { + case "INT16": + return determineConverterI16(outType, byteOrder) + case "UINT16": + return determineConverterU16(outType, byteOrder) + case "INT32": + return determineConverterI32(outType, byteOrder) + case "UINT32": + return determineConverterU32(outType, byteOrder) + case "INT64": + return determineConverterI64(outType, byteOrder) + case "UINT64": + return determineConverterU64(outType, byteOrder) + case "FLOAT32": + return determineConverterF32(outType, byteOrder) + case "FLOAT64": + return determineConverterF64(outType, byteOrder) + } + return nil, fmt.Errorf("invalid input data-type: %s", inType) +} diff --git a/plugins/inputs/modbus/type_conversions16.go b/plugins/inputs/modbus/type_conversions16.go new file mode 100644 index 0000000000000..088a5d10c445a --- /dev/null +++ b/plugins/inputs/modbus/type_conversions16.go @@ -0,0 +1,140 @@ +//go:build !openbsd + +package modbus + +import ( + "encoding/binary" + "fmt" +) + +type convert16 func([]byte) uint16 + +func endianessConverter16(byteOrder string) (convert16, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint16, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint16, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I16 - no scale +func determineConverterI16(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return int16(tohost(b)) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(int16(tohost(b))) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(int16(tohost(b))) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(int16(tohost(b))) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U16 - no scale +func determineConverterU16(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(tohost(b)) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I16 - scale +func determineConverterI16Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return int16(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U16 - scale +func determineConverterU16Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint16(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/modbus/type_conversions32.go b/plugins/inputs/modbus/type_conversions32.go new file mode 100644 index 0000000000000..260a3dc065f70 --- /dev/null +++ b/plugins/inputs/modbus/type_conversions32.go @@ -0,0 +1,202 @@ +//go:build !openbsd + +package modbus + +import ( + "encoding/binary" + "fmt" + "math" +) + +type convert32 func([]byte) uint32 + +func binaryMSWLEU32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) +} + +func binaryLSWBEU32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) +} + +func endianessConverter32(byteOrder string) (convert32, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint32, nil + case "BADC": // Big endian with bytes swapped + return binaryMSWLEU32, nil + case "CDAB": // Little endian with bytes swapped + return binaryLSWBEU32, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint32, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I32 - no scale +func determineConverterI32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return int32(tohost(b)) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(int32(tohost(b))) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(int32(tohost(b))) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(int32(tohost(b))) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U32 - no scale +func determineConverterU32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(tohost(b)) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F32 - no scale +func determineConverterF32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + raw := tohost(b) + return math.Float32frombits(raw) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float64(in) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I32 - scale +func determineConverterI32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return int32(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U32 - scale +func determineConverterU32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint32(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F32 - scale +func determineConverterF32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float32(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/modbus/type_conversions64.go b/plugins/inputs/modbus/type_conversions64.go new file mode 100644 index 0000000000000..55b0a0775c701 --- /dev/null +++ b/plugins/inputs/modbus/type_conversions64.go @@ -0,0 +1,184 @@ +//go:build !openbsd + +package modbus + +import ( + "encoding/binary" + "fmt" + "math" +) + +type convert64 func([]byte) uint64 + +func binaryMSWLEU64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) +} + +func binaryLSWBEU64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) +} + +func endianessConverter64(byteOrder string) (convert64, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint64, nil + case "BADC": // Big endian with bytes swapped + return binaryMSWLEU64, nil + case "CDAB": // Little endian with bytes swapped + return binaryLSWBEU64, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint64, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I64 - no scale +func determineConverterI64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return uint64(in) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return float64(in) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U64 - no scale +func determineConverterU64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "native", "UINT64": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F64 - no scale +func determineConverterF64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + return math.Float64frombits(raw) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I64 - scale +func determineConverterI64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U64 - scale +func determineConverterU64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F64 - scale +func determineConverterF64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float64frombits(raw) + return in * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index cce93dc07376a..678d80c73184d 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -1,6 +1,8 @@ # MongoDB Input Plugin -### Configuration: +All MongoDB server versions from 2.6 and higher are supported. + +## Configuration ```toml [[inputs.mongodb]] @@ -9,7 +11,7 @@ ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status. ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -21,6 +23,10 @@ ## When true, collect per collection stats # gather_col_stats = false + + ## When true, collect usage statistics for each collection + ## (insert, update, queries, remove, getmore, commands etc...). + # gather_top_stat = false ## List of db where collections stats are collected ## If empty, all db are concerned @@ -34,20 +40,22 @@ # insecure_skip_verify = false ``` -#### Permissions: +### Permissions If your MongoDB instance has access control enabled you will need to connect as a user with sufficient rights. With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In version 3.2 you may also need these additional permissions: -``` + +```shell > db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}]) ``` If the user is missing required privileges you may see an error in the Telegraf logs similar to: -``` + +```shell Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 } ``` @@ -55,7 +63,7 @@ Some permission related errors are logged at debug level, you can check these messages by setting `debug = true` in the agent section of the configuration or by running Telegraf with the `--debug` argument. -### Metrics: +### Metrics - mongodb - tags: @@ -225,7 +233,7 @@ by running Telegraf with the `--debug` argument. - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) - updates_per_sec (integer, deprecated in 1.10; use `updates`)) -+ mongodb_db_stats +- mongodb_db_stats - tags: - db_name - hostname @@ -263,13 +271,38 @@ by running Telegraf with the `--debug` argument. - available (integer) - created (integer) - refreshing (integer) + +- mongodb_top_stats + - tags: + - collection + - fields: + - total_time (integer) + - total_count (integer) + - read_lock_time (integer) + - read_lock_count (integer) + - write_lock_time (integer) + - write_lock_count (integer) + - queries_time (integer) + - queries_count (integer) + - get_more_time (integer) + - get_more_count (integer) + - insert_time (integer) + - insert_count (integer) + - update_time (integer) + - update_count (integer) + - remove_time (integer) + - remove_count (integer) + - commands_time (integer) + - commands_count (integer) -### Example Output: -``` +### Example Output + +```shell mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000 +mongodb_top_stats,collection=foo,total_time=1471,total_count=158,read_lock_time=49614,read_lock_count=657,write_lock_time=49125456,write_lock_count=9841,queries_time=174,queries_count=495,get_more_time=498,get_more_count=46,insert_time=2651,insert_count=1265,update_time=0,update_count=0,remove_time=0,remove_count=0,commands_time=498611,commands_count=4615 ``` diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 4ba54137383dd..3417252ddeb59 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -1,10 +1,10 @@ package mongodb import ( + "context" "crypto/tls" "crypto/x509" "fmt" - "net" "net/url" "strings" "sync" @@ -13,20 +13,24 @@ import ( "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/mgo.v2" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" ) type MongoDB struct { Servers []string Ssl Ssl - mongos map[string]*Server GatherClusterStatus bool GatherPerdbStats bool GatherColStats bool + GatherTopStat bool ColStatsDbs []string tlsint.ClientConfig - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` + + clients []*Server } type Ssl struct { @@ -40,7 +44,7 @@ var sampleConfig = ` ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -53,6 +57,10 @@ var sampleConfig = ` ## When true, collect per collection stats # gather_col_stats = false + ## When true, collect usage statistics for each collection + ## (insert, update, queries, remove, getmore, commands etc...). + # gather_top_stat = false + ## List of db where collections stats are collected ## If empty, all db are concerned # col_stats_dbs = ["local"] @@ -73,126 +81,107 @@ func (*MongoDB) Description() string { return "Read metrics from one or many MongoDB servers" } -var localhost = &url.URL{Host: "mongodb://127.0.0.1:27017"} +func (m *MongoDB) Init() error { + var tlsConfig *tls.Config + if m.Ssl.Enabled { + // Deprecated TLS config + tlsConfig = &tls.Config{ + InsecureSkipVerify: m.ClientConfig.InsecureSkipVerify, + } + if len(m.Ssl.CaCerts) == 0 { + return fmt.Errorf("you must explicitly set insecure_skip_verify to skip cerificate validation") + } + + roots := x509.NewCertPool() + for _, caCert := range m.Ssl.CaCerts { + if ok := roots.AppendCertsFromPEM([]byte(caCert)); !ok { + return fmt.Errorf("failed to parse root certificate") + } + } + tlsConfig.RootCAs = roots + } else { + var err error + tlsConfig, err = m.ClientConfig.TLSConfig() + if err != nil { + return err + } + } -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (m *MongoDB) Gather(acc telegraf.Accumulator) error { if len(m.Servers) == 0 { - m.gatherServer(m.getMongoServer(localhost), acc) - return nil + m.Servers = []string{"mongodb://127.0.0.1:27017"} } - var wg sync.WaitGroup - for i, serv := range m.Servers { - if !strings.HasPrefix(serv, "mongodb://") { + for _, connURL := range m.Servers { + if !strings.HasPrefix(connURL, "mongodb://") && !strings.HasPrefix(connURL, "mongodb+srv://") { // Preserve backwards compatibility for hostnames without a // scheme, broken in go 1.8. Remove in Telegraf 2.0 - serv = "mongodb://" + serv - m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv) - m.Servers[i] = serv + connURL = "mongodb://" + connURL + m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", connURL) } - u, err := url.Parse(serv) + u, err := url.Parse(connURL) if err != nil { - m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error()) - continue + return fmt.Errorf("unable to parse connection URL: %q", err) } - if u.Host == "" { - m.Log.Errorf("Unable to parse address %q", serv) - continue - } - - wg.Add(1) - go func(srv *Server) { - defer wg.Done() - err := m.gatherServer(srv, acc) - if err != nil { - m.Log.Errorf("Error in plugin: %v", err) - } - }(m.getMongoServer(u)) - } - wg.Wait() - return nil -} + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() //nolint:revive -func (m *MongoDB) getMongoServer(url *url.URL) *Server { - if _, ok := m.mongos[url.Host]; !ok { - m.mongos[url.Host] = &Server{ - Log: m.Log, - Url: url, + opts := options.Client().ApplyURI(connURL) + if tlsConfig != nil { + opts.TLSConfig = tlsConfig } - } - return m.mongos[url.Host] -} - -func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { - if server.Session == nil { - var dialAddrs []string - if server.Url.User != nil { - dialAddrs = []string{server.Url.String()} - } else { - dialAddrs = []string{server.Url.Host} + if opts.ReadPreference == nil { + opts.ReadPreference = readpref.Nearest() } - dialInfo, err := mgo.ParseURL(dialAddrs[0]) + + client, err := mongo.Connect(ctx, opts) if err != nil { - return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error()) - } - dialInfo.Direct = true - dialInfo.Timeout = 5 * time.Second - - var tlsConfig *tls.Config - - if m.Ssl.Enabled { - // Deprecated TLS config - tlsConfig = &tls.Config{} - if len(m.Ssl.CaCerts) > 0 { - roots := x509.NewCertPool() - for _, caCert := range m.Ssl.CaCerts { - ok := roots.AppendCertsFromPEM([]byte(caCert)) - if !ok { - return fmt.Errorf("failed to parse root certificate") - } - } - tlsConfig.RootCAs = roots - } else { - tlsConfig.InsecureSkipVerify = true - } - } else { - tlsConfig, err = m.ClientConfig.TLSConfig() - if err != nil { - return err - } + return fmt.Errorf("unable to connect to MongoDB: %q", err) } - // If configured to use TLS, add a dial function - if tlsConfig != nil { - dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { - conn, err := tls.Dial("tcp", addr.String(), tlsConfig) - if err != nil { - fmt.Printf("error in Dial, %s\n", err.Error()) - } - return conn, err - } + err = client.Ping(ctx, opts.ReadPreference) + if err != nil { + return fmt.Errorf("unable to connect to MongoDB: %s", err) } - sess, err := mgo.DialWithInfo(dialInfo) - if err != nil { - return fmt.Errorf("unable to connect to MongoDB: %s", err.Error()) + server := &Server{ + client: client, + hostname: u.Host, + Log: m.Log, } - server.Session = sess + m.clients = append(m.clients, server) } - return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs) + + return nil +} + +// Reads stats from all configured servers accumulates stats. +// Returns one of the errors encountered while gather stats (if any). +func (m *MongoDB) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + for _, client := range m.clients { + wg.Add(1) + go func(srv *Server) { + defer wg.Done() + err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs) + if err != nil { + m.Log.Errorf("failed to gather data: %q", err) + } + }(client) + } + + wg.Wait() + return nil } func init() { inputs.Add("mongodb", func() telegraf.Input { return &MongoDB{ - mongos: make(map[string]*Server), GatherClusterStatus: true, GatherPerdbStats: false, GatherColStats: false, + GatherTopStat: false, ColStatsDbs: []string{"local"}, } }) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 6a2c0a86ebd12..e26c0e45231eb 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -15,6 +15,7 @@ type MongodbData struct { DbData []DbData ColData []ColData ShardHostData []DbData + TopStatsData []DbData } type DbData struct { @@ -37,7 +38,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { } } -var DefaultStats = map[string]string{ +var defaultStats = map[string]string{ "uptime_ns": "UptimeNanos", "inserts": "InsertCnt", "inserts_per_sec": "Insert", @@ -94,7 +95,7 @@ var DefaultStats = map[string]string{ "total_docs_scanned": "TotalObjectsScanned", } -var DefaultAssertsStats = map[string]string{ +var defaultAssertsStats = map[string]string{ "assert_regular": "Regular", "assert_warning": "Warning", "assert_msg": "Msg", @@ -102,7 +103,7 @@ var DefaultAssertsStats = map[string]string{ "assert_rollovers": "Rollovers", } -var DefaultCommandsStats = map[string]string{ +var defaultCommandsStats = map[string]string{ "aggregate_command_total": "AggregateCommandTotal", "aggregate_command_failed": "AggregateCommandFailed", "count_command_total": "CountCommandTotal", @@ -123,7 +124,7 @@ var DefaultCommandsStats = map[string]string{ "update_command_failed": "UpdateCommandFailed", } -var DefaultLatencyStats = map[string]string{ +var defaultLatencyStats = map[string]string{ "latency_writes_count": "WriteOpsCnt", "latency_writes": "WriteLatency", "latency_reads_count": "ReadOpsCnt", @@ -132,7 +133,7 @@ var DefaultLatencyStats = map[string]string{ "latency_commands": "CommandLatency", } -var DefaultReplStats = map[string]string{ +var defaultReplStats = map[string]string{ "repl_inserts": "InsertRCnt", "repl_inserts_per_sec": "InsertR", "repl_queries": "QueryRCnt", @@ -164,37 +165,37 @@ var DefaultReplStats = map[string]string{ "repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents", } -var DefaultClusterStats = map[string]string{ +var defaultClusterStats = map[string]string{ "jumbo_chunks": "JumboChunksCount", } -var DefaultShardStats = map[string]string{ +var defaultShardStats = map[string]string{ "total_in_use": "TotalInUse", "total_available": "TotalAvailable", "total_created": "TotalCreated", "total_refreshing": "TotalRefreshing", } -var ShardHostStats = map[string]string{ +var shardHostStats = map[string]string{ "in_use": "InUse", "available": "Available", "created": "Created", "refreshing": "Refreshing", } -var MmapStats = map[string]string{ +var mmapStats = map[string]string{ "mapped_megabytes": "Mapped", "non-mapped_megabytes": "NonMapped", "page_faults": "FaultsCnt", "page_faults_per_sec": "Faults", } -var WiredTigerStats = map[string]string{ +var wiredTigerStats = map[string]string{ "percent_cache_dirty": "CacheDirtyPercent", "percent_cache_used": "CacheUsedPercent", } -var WiredTigerExtStats = map[string]string{ +var wiredTigerExtStats = map[string]string{ "wtcache_tracked_dirty_bytes": "TrackedDirtyBytes", "wtcache_current_bytes": "CurrentCachedBytes", "wtcache_max_bytes_configured": "MaxBytesConfigured", @@ -215,7 +216,7 @@ var WiredTigerExtStats = map[string]string{ "wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted", } -var DefaultTCMallocStats = map[string]string{ +var defaultTCMallocStats = map[string]string{ "tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes", "tcmalloc_heap_size": "TCMallocHeapSize", "tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes", @@ -237,13 +238,13 @@ var DefaultTCMallocStats = map[string]string{ "tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes", } -var DefaultStorageStats = map[string]string{ +var defaultStorageStats = map[string]string{ "storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted", "storage_freelist_search_requests": "StorageFreelistSearchRequests", "storage_freelist_search_scanned": "StorageFreelistSearchScanned", } -var DbDataStats = map[string]string{ +var dbDataStats = map[string]string{ "collections": "Collections", "objects": "Objects", "avg_obj_size": "AvgObjSize", @@ -255,7 +256,7 @@ var DbDataStats = map[string]string{ "ok": "Ok", } -var ColDataStats = map[string]string{ +var colDataStats = map[string]string{ "count": "Count", "size": "Size", "avg_obj_size": "AvgObjSize", @@ -264,6 +265,27 @@ var ColDataStats = map[string]string{ "ok": "Ok", } +var topDataStats = map[string]string{ + "total_time": "TotalTime", + "total_count": "TotalCount", + "read_lock_time": "ReadLockTime", + "read_lock_count": "ReadLockCount", + "write_lock_time": "WriteLockTime", + "write_lock_count": "WriteLockCount", + "queries_time": "QueriesTime", + "queries_count": "QueriesCount", + "get_more_time": "GetMoreTime", + "get_more_count": "GetMoreCount", + "insert_time": "InsertTime", + "insert_count": "InsertCount", + "update_time": "UpdateTime", + "update_count": "UpdateCount", + "remove_time": "RemoveTime", + "remove_count": "RemoveCount", + "commands_time": "CommandsTime", + "commands_count": "CommandsCount", +} + func (d *MongodbData) AddDbStats() { for _, dbstat := range d.StatLine.DbStatsLines { dbStatLine := reflect.ValueOf(&dbstat).Elem() @@ -272,7 +294,7 @@ func (d *MongodbData) AddDbStats() { Fields: make(map[string]interface{}), } newDbData.Fields["type"] = "db_stat" - for key, value := range DbDataStats { + for key, value := range dbDataStats { val := dbStatLine.FieldByName(value).Interface() newDbData.Fields[key] = val } @@ -289,7 +311,7 @@ func (d *MongodbData) AddColStats() { Fields: make(map[string]interface{}), } newColData.Fields["type"] = "col_stat" - for key, value := range ColDataStats { + for key, value := range colDataStats { val := colStatLine.FieldByName(value).Interface() newColData.Fields[key] = val } @@ -305,7 +327,7 @@ func (d *MongodbData) AddShardHostStats() { Fields: make(map[string]interface{}), } newDbData.Fields["type"] = "shard_host_stat" - for k, v := range ShardHostStats { + for k, v := range shardHostStats { val := hostStatLine.FieldByName(v).Interface() newDbData.Fields[k] = val } @@ -313,16 +335,32 @@ func (d *MongodbData) AddShardHostStats() { } } +func (d *MongodbData) AddTopStats() { + for _, topStat := range d.StatLine.TopStatLines { + topStatLine := reflect.ValueOf(&topStat).Elem() + newTopStatData := &DbData{ + Name: topStat.CollectionName, + Fields: make(map[string]interface{}), + } + newTopStatData.Fields["type"] = "top_stat" + for key, value := range topDataStats { + val := topStatLine.FieldByName(value).Interface() + newTopStatData.Fields[key] = val + } + d.TopStatsData = append(d.TopStatsData, *newTopStatData) + } +} + func (d *MongodbData) AddDefaultStats() { statLine := reflect.ValueOf(d.StatLine).Elem() - d.addStat(statLine, DefaultStats) + d.addStat(statLine, defaultStats) if d.StatLine.NodeType != "" { - d.addStat(statLine, DefaultReplStats) + d.addStat(statLine, defaultReplStats) d.Tags["node_type"] = d.StatLine.NodeType } if d.StatLine.ReadLatency > 0 { - d.addStat(statLine, DefaultLatencyStats) + d.addStat(statLine, defaultLatencyStats) } if d.StatLine.ReplSetName != "" { @@ -337,23 +375,23 @@ func (d *MongodbData) AddDefaultStats() { d.add("version", d.StatLine.Version) } - d.addStat(statLine, DefaultAssertsStats) - d.addStat(statLine, DefaultClusterStats) - d.addStat(statLine, DefaultCommandsStats) - d.addStat(statLine, DefaultShardStats) - d.addStat(statLine, DefaultStorageStats) - d.addStat(statLine, DefaultTCMallocStats) + d.addStat(statLine, defaultAssertsStats) + d.addStat(statLine, defaultClusterStats) + d.addStat(statLine, defaultCommandsStats) + d.addStat(statLine, defaultShardStats) + d.addStat(statLine, defaultStorageStats) + d.addStat(statLine, defaultTCMallocStats) if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" { - d.addStat(statLine, MmapStats) + d.addStat(statLine, mmapStats) } else if d.StatLine.StorageEngine == "wiredTiger" { - for key, value := range WiredTigerStats { + for key, value := range wiredTigerStats { val := statLine.FieldByName(value).Interface() percentVal := fmt.Sprintf("%.1f", val.(float64)*100) floatVal, _ := strconv.ParseFloat(percentVal, 64) d.add(key, floatVal) } - d.addStat(statLine, WiredTigerExtStats) + d.addStat(statLine, wiredTigerExtStats) d.add("page_faults", d.StatLine.FaultsCnt) } } @@ -409,4 +447,14 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) { ) host.Fields = make(map[string]interface{}) } + for _, col := range d.TopStatsData { + d.Tags["collection"] = col.Name + acc.AddFields( + "mongodb_top_stats", + col.Fields, + d.Tags, + d.StatLine.Time, + ) + col.Fields = make(map[string]interface{}) + } } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 4a1730211b594..f7f891ec775bf 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -5,8 +5,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -64,8 +65,8 @@ func TestAddNonReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + for key := range defaultStats { + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -85,8 +86,8 @@ func TestAddReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range MmapStats { - assert.True(t, acc.HasInt64Field("mongodb", key), key) + for key := range mmapStats { + require.True(t, acc.HasInt64Field("mongodb", key), key) } } @@ -119,15 +120,15 @@ func TestAddWiredTigerStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range WiredTigerStats { - assert.True(t, acc.HasFloatField("mongodb", key), key) + for key := range wiredTigerStats { + require.True(t, acc.HasFloatField("mongodb", key), key) } - for key := range WiredTigerExtStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + for key := range wiredTigerExtStats { + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } - assert.True(t, acc.HasInt64Field("mongodb", "page_faults")) + require.True(t, acc.HasInt64Field("mongodb", "page_faults")) } func TestAddShardStats(t *testing.T) { @@ -146,8 +147,8 @@ func TestAddShardStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultShardStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultShardStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -169,8 +170,8 @@ func TestAddLatencyStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultLatencyStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultLatencyStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -191,8 +192,8 @@ func TestAddAssertsStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultAssertsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultAssertsStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -226,8 +227,8 @@ func TestAddCommandsStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultCommandsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultCommandsStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -262,8 +263,8 @@ func TestAddTCMallocStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultTCMallocStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultTCMallocStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -282,8 +283,8 @@ func TestAddStorageStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultStorageStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultStorageStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -312,16 +313,16 @@ func TestAddShardHostStats(t *testing.T) { var hostsFound []string for host := range hostStatLines { - for key := range ShardHostStats { - assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) + for key := range shardHostStats { + require.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } - assert.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) + require.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) hostsFound = append(hostsFound, host) } sort.Strings(hostsFound) sort.Strings(expectedHosts) - assert.Equal(t, hostsFound, expectedHosts) + require.Equal(t, hostsFound, expectedHosts) } func TestStateTag(t *testing.T) { @@ -485,3 +486,49 @@ func TestStateTag(t *testing.T) { } acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } + +func TestAddTopStats(t *testing.T) { + collections := []string{"collectionOne", "collectionTwo"} + var topStatLines []TopStatLine + for _, collection := range collections { + topStatLine := TopStatLine{ + CollectionName: collection, + TotalTime: 0, + TotalCount: 0, + ReadLockTime: 0, + ReadLockCount: 0, + WriteLockTime: 0, + WriteLockCount: 0, + QueriesTime: 0, + QueriesCount: 0, + GetMoreTime: 0, + GetMoreCount: 0, + InsertTime: 0, + InsertCount: 0, + UpdateTime: 0, + UpdateCount: 0, + RemoveTime: 0, + RemoveCount: 0, + CommandsTime: 0, + CommandsCount: 0, + } + topStatLines = append(topStatLines, topStatLine) + } + + d := NewMongodbData( + &StatLine{ + TopStatLines: topStatLines, + }, + tags, + ) + + var acc testutil.Accumulator + d.AddTopStats() + d.flush(&acc) + + for range topStatLines { + for key := range topDataStats { + require.True(t, acc.HasInt64Field("mongodb_top_stats", key)) + } + } +} diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 5af48c10a6f9b..79d3d36c6c038 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -1,19 +1,23 @@ package mongodb import ( + "context" "fmt" - "net/url" + "go.mongodb.org/mongo-driver/bson/primitive" + "strconv" "strings" "time" "github.com/influxdata/telegraf" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" ) type Server struct { - Url *url.URL - Session *mgo.Session + client *mongo.Client + hostname string lastResult *MongoStatus Log telegraf.Logger @@ -21,12 +25,12 @@ type Server struct { func (s *Server) getDefaultTags() map[string]string { tags := make(map[string]string) - tags["hostname"] = s.Url.Host + tags["hostname"] = s.hostname return tags } type oplogEntry struct { - Timestamp bson.MongoTimestamp `bson:"ts"` + Timestamp primitive.Timestamp `bson:"ts"` } func IsAuthorization(err error) bool { @@ -41,15 +45,23 @@ func (s *Server) authLog(err error) { } } +func (s *Server) runCommand(database string, cmd interface{}, result interface{}) error { + r := s.client.Database(database).RunCommand(context.Background(), cmd) + if r.Err() != nil { + return r.Err() + } + return r.Decode(result) +} + func (s *Server) gatherServerStatus() (*ServerStatus, error) { serverStatus := &ServerStatus{} - err := s.Session.DB("admin").Run(bson.D{ + err := s.runCommand("admin", bson.D{ { - Name: "serverStatus", + Key: "serverStatus", Value: 1, }, { - Name: "recordStats", + Key: "recordStats", Value: 0, }, }, serverStatus) @@ -61,9 +73,9 @@ func (s *Server) gatherServerStatus() (*ServerStatus, error) { func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { replSetStatus := &ReplSetStatus{} - err := s.Session.DB("admin").Run(bson.D{ + err := s.runCommand("admin", bson.D{ { - Name: "replSetGetStatus", + Key: "replSetGetStatus", Value: 1, }, }, replSetStatus) @@ -73,22 +85,71 @@ func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { return replSetStatus, nil } +func (s *Server) gatherTopStatData() (*TopStats, error) { + dest := &bsonx.Doc{} + err := s.runCommand("admin", bson.D{ + { + Key: "top", + Value: 1, + }, + }, dest) + if err != nil { + return nil, err + } + + // From: https://github.com/mongodb/mongo-tools/blob/master/mongotop/mongotop.go#L49-L70 + // Remove 'note' field that prevents easy decoding, then round-trip + // again to simplify unpacking into the nested data structure + totals, err := dest.LookupErr("totals") + if err != nil { + return nil, err + } + recoded, err := totals.Document().Delete("note").MarshalBSON() + if err != nil { + return nil, err + } + topInfo := make(map[string]TopStatCollection) + if err := bson.Unmarshal(recoded, &topInfo); err != nil { + return nil, err + } + + return &TopStats{Totals: topInfo}, nil +} + func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { - chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() + chunkCount, err := s.client.Database("config").Collection("chunks").CountDocuments(context.Background(), bson.M{"jumbo": true}) if err != nil { return nil, err } return &ClusterStatus{ - JumboChunksCount: int64(chunkCount), + JumboChunksCount: chunkCount, }, nil } -func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { +func poolStatsCommand(version string) (string, error) { + majorPart := string(version[0]) + major, err := strconv.ParseInt(majorPart, 10, 64) + if err != nil { + return "", err + } + + if major == 5 { + return "connPoolStats", nil + } + return "shardConnPoolStats", nil +} + +func (s *Server) gatherShardConnPoolStats(version string) (*ShardStats, error) { + command, err := poolStatsCommand(version) + if err != nil { + return nil, err + } + shardStats := &ShardStats{} - err := s.Session.DB("admin").Run(bson.D{ + err = s.runCommand("admin", bson.D{ { - Name: "shardConnPoolStats", + Key: command, Value: 1, }, }, &shardStats) @@ -100,9 +161,9 @@ func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { func (s *Server) gatherDBStats(name string) (*Db, error) { stats := &DbStatsData{} - err := s.Session.DB(name).Run(bson.D{ + err := s.runCommand(name, bson.D{ { - Name: "dbStats", + Key: "dbStats", Value: 1, }, }, stats) @@ -120,19 +181,25 @@ func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) { query := bson.M{"ts": bson.M{"$exists": true}} var first oplogEntry - err := s.Session.DB("local").C(collection).Find(query).Sort("$natural").Limit(1).One(&first) - if err != nil { + firstResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": 1})) + if firstResult.Err() != nil { + return nil, firstResult.Err() + } + if err := firstResult.Decode(&first); err != nil { return nil, err } var last oplogEntry - err = s.Session.DB("local").C(collection).Find(query).Sort("-$natural").Limit(1).One(&last) - if err != nil { + lastResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": -1})) + if lastResult.Err() != nil { + return nil, lastResult.Err() + } + if err := lastResult.Decode(&last); err != nil { return nil, err } - firstTime := time.Unix(int64(first.Timestamp>>32), 0) - lastTime := time.Unix(int64(last.Timestamp>>32), 0) + firstTime := time.Unix(int64(first.Timestamp.T), 0) + lastTime := time.Unix(int64(last.Timestamp.T), 0) stats := &OplogStats{ TimeDiff: int64(lastTime.Sub(firstTime).Seconds()), } @@ -154,7 +221,7 @@ func (s *Server) gatherOplogStats() (*OplogStats, error) { } func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) { - names, err := s.Session.DatabaseNames() + names, err := s.client.ListDatabaseNames(context.Background(), bson.D{}) if err != nil { return nil, err } @@ -163,16 +230,16 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) for _, dbName := range names { if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 { var colls []string - colls, err = s.Session.DB(dbName).CollectionNames() + colls, err = s.client.Database(dbName).ListCollectionNames(context.Background(), bson.D{}) if err != nil { s.Log.Errorf("Error getting collection names: %s", err.Error()) continue } for _, colName := range colls { colStatLine := &ColStatsData{} - err = s.Session.DB(dbName).Run(bson.D{ + err = s.runCommand(dbName, bson.D{ { - Name: "collStats", + Key: "collStats", Value: colName, }, }, colStatLine) @@ -192,10 +259,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) return results, nil } -func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { - s.Session.SetMode(mgo.Eventual, true) - s.Session.SetSocketTimeout(0) - +func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, gatherTopStat bool, colStatsDbs []string) error { serverStatus, err := s.gatherServerStatus() if err != nil { return err @@ -227,7 +291,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, clusterStatus = status } - shardStats, err := s.gatherShardConnPoolStats() + shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version) if err != nil { s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) } @@ -243,7 +307,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, dbStats := &DbStats{} if gatherDbStats { - names, err := s.Session.DatabaseNames() + names, err := s.client.ListDatabaseNames(context.Background(), bson.D{}) if err != nil { return err } @@ -257,6 +321,16 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, } } + topStatData := &TopStats{} + if gatherTopStat { + topStats, err := s.gatherTopStatData() + if err != nil { + s.Log.Debugf("Unable to gather top stat data: %s", err.Error()) + return err + } + topStatData = topStats + } + result := &MongoStatus{ ServerStatus: serverStatus, ReplSetStatus: replSetStatus, @@ -265,6 +339,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, ColStats: collectionStats, ShardStats: shardStats, OplogStats: oplogStats, + TopStats: topStatData, } result.SampleTime = time.Now() @@ -275,13 +350,14 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, durationInSeconds = 1 } data := NewMongodbData( - NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds), + NewStatLine(*s.lastResult, *result, s.hostname, true, durationInSeconds), s.getDefaultTags(), ) data.AddDefaultStats() data.AddDbStats() data.AddColStats() data.AddShardHostStats() + data.AddTopStats() data.flush(acc) } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 91a3c0709f0d4..d2313e4088f82 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb @@ -5,9 +6,9 @@ package mongodb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGetDefaultTags(t *testing.T) { @@ -15,7 +16,7 @@ func TestGetDefaultTags(t *testing.T) { in string out string }{ - {"hostname", server.Url.Host}, + {"hostname", server.hostname}, } defaultTags := server.getDefaultTags() for _, tt := range tagTests { @@ -28,14 +29,56 @@ func TestGetDefaultTags(t *testing.T) { func TestAddDefaultStats(t *testing.T) { var acc testutil.Accumulator - err := server.gatherData(&acc, false) + err := server.gatherData(&acc, false, true, true, true, []string{"local"}) require.NoError(t, err) // need to call this twice so it can perform the diff - err = server.gatherData(&acc, false) + err = server.gatherData(&acc, false, true, true, true, []string{"local"}) require.NoError(t, err) - for key := range DefaultStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultStats { + require.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestPoolStatsVersionCompatibility(t *testing.T) { + tests := []struct { + name string + version string + expectedCommand string + err bool + }{ + { + name: "mongodb v3", + version: "3.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v4", + version: "4.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v5", + version: "5.0.0", + expectedCommand: "connPoolStats", + }, + { + name: "invalid version", + version: "v4", + err: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + command, err := poolStatsCommand(test.version) + require.Equal(t, test.expectedCommand, command) + if test.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) } } diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index 73e68ed376784..24aa2fe3e0d04 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -1,62 +1,44 @@ +//go:build integration // +build integration package mongodb import ( + "context" "log" "math/rand" - "net/url" "os" "testing" "time" - "gopkg.in/mgo.v2" + "github.com/influxdata/telegraf/testutil" ) -var connect_url string var server *Server -func init() { - connect_url = os.Getenv("MONGODB_URL") - if connect_url == "" { - connect_url = "127.0.0.1:27017" - server = &Server{Url: &url.URL{Host: connect_url}} - } else { - full_url, err := url.Parse(connect_url) - if err != nil { - log.Fatalf("Unable to parse URL (%s), %s\n", full_url, err.Error()) - } - server = &Server{Url: full_url} +func testSetup(_ *testing.M) { + connectionString := os.Getenv("MONGODB_URL") + if connectionString == "" { + connectionString = "mongodb://127.0.0.1:27017" } -} -func testSetup(m *testing.M) { - var err error - var dialAddrs []string - if server.Url.User != nil { - dialAddrs = []string{server.Url.String()} - } else { - dialAddrs = []string{server.Url.Host} - } - dialInfo, err := mgo.ParseURL(dialAddrs[0]) - if err != nil { - log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error()) - } - dialInfo.Direct = true - dialInfo.Timeout = 5 * time.Second - sess, err := mgo.DialWithInfo(dialInfo) - if err != nil { - log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error()) + m := &MongoDB{ + Log: testutil.Logger{}, + Servers: []string{connectionString}, } - server.Session = sess - server.Session, _ = mgo.Dial(server.Url.Host) + err := m.Init() if err != nil { - log.Fatalln(err.Error()) + log.Fatalf("Failed to connect to MongoDB: %v", err) } + + server = m.clients[0] } -func testTeardown(m *testing.M) { - server.Session.Close() +func testTeardown(_ *testing.M) { + err := server.client.Disconnect(context.Background()) + if err != nil { + log.Fatalf("failed to disconnect: %v", err) + } } func TestMain(m *testing.M) { diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index ee96d5f8b3ad1..2490ca2c1777c 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -37,9 +37,12 @@ type MongoStatus struct { ColStats *ColStats ShardStats *ShardStats OplogStats *OplogStats + TopStats *TopStats } type ServerStatus struct { + SampleTime time.Time `bson:""` + Flattened map[string]interface{} `bson:""` Host string `bson:"host"` Version string `bson:"version"` Process string `bson:"process"` @@ -63,7 +66,7 @@ type ServerStatus struct { Mem *MemStats `bson:"mem"` Repl *ReplStatus `bson:"repl"` ShardCursorType map[string]interface{} `bson:"shardCursorType"` - StorageEngine map[string]string `bson:"storageEngine"` + StorageEngine *StorageEngine `bson:"storageEngine"` WiredTiger *WiredTiger `bson:"wiredTiger"` Metrics *MetricsStats `bson:"metrics"` TCMallocStats *TCMallocStats `bson:"tcmalloc"` @@ -169,6 +172,27 @@ type ShardHostStatsData struct { Refreshing int64 `bson:"refreshing"` } +type TopStats struct { + Totals map[string]TopStatCollection `bson:"totals"` +} + +type TopStatCollection struct { + Total TopStatCollectionData `bson:"total"` + ReadLock TopStatCollectionData `bson:"readLock"` + WriteLock TopStatCollectionData `bson:"writeLock"` + Queries TopStatCollectionData `bson:"queries"` + GetMore TopStatCollectionData `bson:"getmore"` + Insert TopStatCollectionData `bson:"insert"` + Update TopStatCollectionData `bson:"update"` + Remove TopStatCollectionData `bson:"remove"` + Commands TopStatCollectionData `bson:"commands"` +} + +type TopStatCollectionData struct { + Time int64 `bson:"time"` + Count int64 `bson:"count"` +} + type ConcurrentTransactions struct { Write ConcurrentTransStats `bson:"write"` Read ConcurrentTransStats `bson:"read"` @@ -212,6 +236,10 @@ type CacheStats struct { UnmodifiedPagesEvicted int64 `bson:"unmodified pages evicted"` } +type StorageEngine struct { + Name string `bson:"name"` +} + // TransactionStats stores transaction checkpoints in WiredTiger. type TransactionStats struct { TransCheckpointsTotalTimeMsecs int64 `bson:"transaction checkpoint total time (msecs)"` @@ -220,14 +248,15 @@ type TransactionStats struct { // ReplStatus stores data related to replica sets. type ReplStatus struct { - SetName interface{} `bson:"setName"` - IsMaster interface{} `bson:"ismaster"` - Secondary interface{} `bson:"secondary"` - IsReplicaSet interface{} `bson:"isreplicaset"` - ArbiterOnly interface{} `bson:"arbiterOnly"` - Hosts []string `bson:"hosts"` - Passives []string `bson:"passives"` - Me string `bson:"me"` + SetName string `bson:"setName"` + IsWritablePrimary interface{} `bson:"isWritablePrimary"` // mongodb 5.x + IsMaster interface{} `bson:"ismaster"` + Secondary interface{} `bson:"secondary"` + IsReplicaSet interface{} `bson:"isreplicaset"` + ArbiterOnly interface{} `bson:"arbiterOnly"` + Hosts []string `bson:"hosts"` + Passives []string `bson:"passives"` + Me string `bson:"me"` } // DBRecordStats stores data related to memory operations across databases. @@ -768,6 +797,8 @@ type StatLine struct { // Shard Hosts stats field ShardHostStatsLines map[string]ShardHostStatLine + TopStatLines []TopStatLine + // TCMalloc stats field TCMallocCurrentAllocatedBytes int64 TCMallocHeapSize int64 @@ -825,6 +856,19 @@ type ShardHostStatLine struct { Refreshing int64 } +type TopStatLine struct { + CollectionName string + TotalTime, TotalCount int64 + ReadLockTime, ReadLockCount int64 + WriteLockTime, WriteLockCount int64 + QueriesTime, QueriesCount int64 + GetMoreTime, GetMoreCount int64 + InsertTime, InsertCount int64 + UpdateTime, UpdateCount int64 + RemoveTime, RemoveCount int64 + CommandsTime, CommandsCount int64 +} + func parseLocks(stat ServerStatus) map[string]LockUsage { returnVal := map[string]LockUsage{} for namespace, lockInfo := range stat.Locks { @@ -859,7 +903,7 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage { return lockUsages } -func diff(newVal, oldVal, sampleTime int64) (int64, int64) { +func diff(newVal, oldVal, sampleTime int64) (avg int64, newValue int64) { d := newVal - oldVal if d < 0 { d = newVal @@ -891,8 +935,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.TotalCreatedC = newStat.Connections.TotalCreated // set the storage engine appropriately - if newStat.StorageEngine != nil && newStat.StorageEngine["name"] != "" { - returnVal.StorageEngine = newStat.StorageEngine["name"] + if newStat.StorageEngine != nil && newStat.StorageEngine.Name != "" { + returnVal.StorageEngine = newStat.StorageEngine.Name } else { returnVal.StorageEngine = "mmapv1" } @@ -1043,8 +1087,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Metrics.Repl.Network != nil { returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes - returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num - returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + if newStat.Metrics.Repl.Network.GetMores != nil { + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + } returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops } } @@ -1101,7 +1147,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.Time = newMongo.SampleTime returnVal.IsMongos = - (newStat.ShardCursorType != nil || strings.HasPrefix(newStat.Process, MongosProcess)) + newStat.ShardCursorType != nil || strings.HasPrefix(newStat.Process, MongosProcess) // BEGIN code modification if oldStat.Mem.Supported.(bool) { @@ -1118,21 +1164,19 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Repl != nil { - setName, isReplSet := newStat.Repl.SetName.(string) - if isReplSet { - returnVal.ReplSetName = setName - } + returnVal.ReplSetName = newStat.Repl.SetName // BEGIN code modification - if newStat.Repl.IsMaster.(bool) { + if val, ok := newStat.Repl.IsMaster.(bool); ok && val { + returnVal.NodeType = "PRI" + } else if val, ok := newStat.Repl.IsWritablePrimary.(bool); ok && val { returnVal.NodeType = "PRI" - } else if newStat.Repl.Secondary != nil && newStat.Repl.Secondary.(bool) { + } else if val, ok := newStat.Repl.Secondary.(bool); ok && val { returnVal.NodeType = "SEC" - } else if newStat.Repl.ArbiterOnly != nil && newStat.Repl.ArbiterOnly.(bool) { + } else if val, ok := newStat.Repl.ArbiterOnly.(bool); ok && val { returnVal.NodeType = "ARB" } else { returnVal.NodeType = "UNK" - } - // END code modification + } // END code modification } else if returnVal.IsMongos { returnVal.NodeType = "RTR" } @@ -1180,9 +1224,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // Get the entry with the highest lock highestLocked := lockdiffs[len(lockdiffs)-1] - var timeDiffMillis int64 - timeDiffMillis = newStat.UptimeMillis - oldStat.UptimeMillis - + timeDiffMillis := newStat.UptimeMillis - oldStat.UptimeMillis lockToReport := highestLocked.Writes // if the highest locked namespace is not '.' @@ -1210,7 +1252,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.GlobalLock != nil { - hasWT := (newStat.WiredTiger != nil && oldStat.WiredTiger != nil) + hasWT := newStat.WiredTiger != nil && oldStat.WiredTiger != nil //If we have wiredtiger stats, use those instead if newStat.GlobalLock.CurrentQueue != nil { if hasWT { @@ -1269,10 +1311,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // I'm the master returnVal.ReplLag = 0 break - } else { - // I'm secondary - me = member } + + // I'm secondary + me = member } else if member.State == 1 { // Master found master = member @@ -1365,5 +1407,32 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } } + if newMongo.TopStats != nil { + for collection, data := range newMongo.TopStats.Totals { + topStatDataLine := &TopStatLine{ + CollectionName: collection, + TotalTime: data.Total.Time, + TotalCount: data.Total.Count, + ReadLockTime: data.ReadLock.Time, + ReadLockCount: data.ReadLock.Count, + WriteLockTime: data.WriteLock.Time, + WriteLockCount: data.WriteLock.Count, + QueriesTime: data.Queries.Time, + QueriesCount: data.Queries.Count, + GetMoreTime: data.GetMore.Time, + GetMoreCount: data.GetMore.Count, + InsertTime: data.Insert.Time, + InsertCount: data.Insert.Count, + UpdateTime: data.Update.Time, + UpdateCount: data.Update.Count, + RemoveTime: data.Remove.Time, + RemoveCount: data.Remove.Count, + CommandsTime: data.Commands.Time, + CommandsCount: data.Commands.Count, + } + returnVal.TopStatLines = append(returnVal.TopStatLines, *topStatDataLine) + } + } + return returnVal } diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go index 5506602a9e692..908b82de1b911 100644 --- a/plugins/inputs/mongodb/mongostat_test.go +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -2,14 +2,11 @@ package mongodb import ( "testing" - //"time" - //"github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLatencyStats(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -56,16 +53,15 @@ func TestLatencyStats(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiffZero(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -126,16 +122,15 @@ func TestLatencyStatsDiffZero(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiff(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -196,10 +191,10 @@ func TestLatencyStatsDiff(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(59177981552)) - assert.Equal(t, sl.ReadLatency, int64(2255946760057)) - assert.Equal(t, sl.WriteLatency, int64(494479456987)) - assert.Equal(t, sl.CommandOpsCnt, int64(1019152861)) - assert.Equal(t, sl.ReadOpsCnt, int64(4189049884)) - assert.Equal(t, sl.WriteOpsCnt, int64(1691021287)) + require.Equal(t, sl.CommandLatency, int64(59177981552)) + require.Equal(t, sl.ReadLatency, int64(2255946760057)) + require.Equal(t, sl.WriteLatency, int64(494479456987)) + require.Equal(t, sl.CommandOpsCnt, int64(1019152861)) + require.Equal(t, sl.ReadOpsCnt, int64(4189049884)) + require.Equal(t, sl.WriteOpsCnt, int64(1691021287)) } diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index be116394d6609..aa4a08b31bbc8 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -128,6 +128,7 @@ Minimum Version of Monit tested with is 5.16. - hostname - port_number - request + - response_time - protocol - type @@ -232,4 +233,5 @@ monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=non monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000 monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000 monit_system,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=debian-stretch-monit.virt,source=xyzzy.local,status=running,version=5.20.0 cpu_load_avg_15m=0,cpu_load_avg_1m=0,cpu_load_avg_5m=0,cpu_system=0,cpu_user=0,cpu_wait=0,mem_kb=42852i,mem_percent=2.1,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,status_code=0i,swap_kb=0,swap_percent=0 1579735047000000000 +monit_remote_host,dc=new-12,host=palladium,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,rack=rack-0,service=blog.kalvad.com,source=palladium,status=running,version=5.27.0 monitoring_status_code=1i,monitoring_mode_code=0i,response_time=0.664412,type="TCP",pending_action_code=0i,remote_hostname="blog.kalvad.com",port_number=443i,request="/",protocol="HTTP",status_code=0i 1599138990000000000 ``` diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index a17042bf5e3a9..051e0b36982fe 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -4,24 +4,26 @@ import ( "encoding/xml" "fmt" "net/http" + "time" + + "golang.org/x/net/html/charset" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "golang.org/x/net/html/charset" ) const ( - fileSystem string = "0" - directory = "1" - file = "2" - process = "3" - remoteHost = "4" - system = "5" - fifo = "6" - program = "7" - network = "8" + fileSystem = "0" + directory = "1" + file = "2" + process = "3" + remoteHost = "4" + system = "5" + fifo = "6" + program = "7" + network = "8" ) var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"} @@ -114,11 +116,12 @@ type Upload struct { } type Port struct { - Hostname string `xml:"hostname"` - PortNumber int64 `xml:"portnumber"` - Request string `xml:"request"` - Protocol string `xml:"protocol"` - Type string `xml:"type"` + Hostname string `xml:"hostname"` + PortNumber int64 `xml:"portnumber"` + Request string `xml:"request"` + ResponseTime float64 `xml:"responsetime"` + Protocol string `xml:"protocol"` + Type string `xml:"type"` } type Block struct { @@ -177,7 +180,7 @@ type Monit struct { Password string `toml:"password"` client http.Client tls.ClientConfig - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` } type Messagebody struct { @@ -222,13 +225,12 @@ func (m *Monit) Init() error { TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, }, - Timeout: m.Timeout.Duration, + Timeout: time.Duration(m.Timeout), } return nil } func (m *Monit) Gather(acc telegraf.Accumulator) error { - req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil) if err != nil { return err @@ -243,111 +245,109 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - if resp.StatusCode == 200 { + if resp.StatusCode != 200 { + return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) + } - var status Status - decoder := xml.NewDecoder(resp.Body) - decoder.CharsetReader = charset.NewReaderLabel - if err := decoder.Decode(&status); err != nil { - return fmt.Errorf("error parsing input: %v", err) - } + var status Status + decoder := xml.NewDecoder(resp.Body) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&status); err != nil { + return fmt.Errorf("error parsing input: %v", err) + } - tags := map[string]string{ - "version": status.Server.Version, - "source": status.Server.LocalHostname, - "platform_name": status.Platform.Name, - } + tags := map[string]string{ + "version": status.Server.Version, + "source": status.Server.LocalHostname, + "platform_name": status.Platform.Name, + } - for _, service := range status.Services { - fields := make(map[string]interface{}) - tags["status"] = serviceStatus(service) - fields["status_code"] = service.Status - tags["pending_action"] = pendingAction(service) - fields["pending_action_code"] = service.PendingAction - tags["monitoring_status"] = monitoringStatus(service) - fields["monitoring_status_code"] = service.MonitoringStatus - tags["monitoring_mode"] = monitoringMode(service) - fields["monitoring_mode_code"] = service.MonitorMode - tags["service"] = service.Name - if service.Type == fileSystem { - fields["mode"] = service.Mode - fields["block_percent"] = service.Block.Percent - fields["block_usage"] = service.Block.Usage - fields["block_total"] = service.Block.Total - fields["inode_percent"] = service.Inode.Percent - fields["inode_usage"] = service.Inode.Usage - fields["inode_total"] = service.Inode.Total - acc.AddFields("monit_filesystem", fields, tags) - } else if service.Type == directory { - fields["mode"] = service.Mode - acc.AddFields("monit_directory", fields, tags) - } else if service.Type == file { - fields["size"] = service.Size - fields["mode"] = service.Mode - acc.AddFields("monit_file", fields, tags) - } else if service.Type == process { - fields["cpu_percent"] = service.CPU.Percent - fields["cpu_percent_total"] = service.CPU.PercentTotal - fields["mem_kb"] = service.Memory.Kilobyte - fields["mem_kb_total"] = service.Memory.KilobyteTotal - fields["mem_percent"] = service.Memory.Percent - fields["mem_percent_total"] = service.Memory.PercentTotal - fields["pid"] = service.Pid - fields["parent_pid"] = service.ParentPid - fields["threads"] = service.Threads - fields["children"] = service.Children - acc.AddFields("monit_process", fields, tags) - } else if service.Type == remoteHost { - fields["remote_hostname"] = service.Port.Hostname - fields["port_number"] = service.Port.PortNumber - fields["request"] = service.Port.Request - fields["protocol"] = service.Port.Protocol - fields["type"] = service.Port.Type - acc.AddFields("monit_remote_host", fields, tags) - } else if service.Type == system { - fields["cpu_system"] = service.System.CPU.System - fields["cpu_user"] = service.System.CPU.User - fields["cpu_wait"] = service.System.CPU.Wait - fields["cpu_load_avg_1m"] = service.System.Load.Avg01 - fields["cpu_load_avg_5m"] = service.System.Load.Avg05 - fields["cpu_load_avg_15m"] = service.System.Load.Avg15 - fields["mem_kb"] = service.System.Memory.Kilobyte - fields["mem_percent"] = service.System.Memory.Percent - fields["swap_kb"] = service.System.Swap.Kilobyte - fields["swap_percent"] = service.System.Swap.Percent - acc.AddFields("monit_system", fields, tags) - } else if service.Type == fifo { - fields["mode"] = service.Mode - acc.AddFields("monit_fifo", fields, tags) - } else if service.Type == program { - fields["program_started"] = service.Program.Started * 10000000 - fields["program_status"] = service.Program.Status - acc.AddFields("monit_program", fields, tags) - } else if service.Type == network { - fields["link_state"] = service.Link.State - fields["link_speed"] = service.Link.Speed - fields["link_mode"] = linkMode(service) - fields["download_packets_now"] = service.Link.Download.Packets.Now - fields["download_packets_total"] = service.Link.Download.Packets.Total - fields["download_bytes_now"] = service.Link.Download.Bytes.Now - fields["download_bytes_total"] = service.Link.Download.Bytes.Total - fields["download_errors_now"] = service.Link.Download.Errors.Now - fields["download_errors_total"] = service.Link.Download.Errors.Total - fields["upload_packets_now"] = service.Link.Upload.Packets.Now - fields["upload_packets_total"] = service.Link.Upload.Packets.Total - fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now - fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total - fields["upload_errors_now"] = service.Link.Upload.Errors.Now - fields["upload_errors_total"] = service.Link.Upload.Errors.Total - acc.AddFields("monit_network", fields, tags) - } + for _, service := range status.Services { + fields := make(map[string]interface{}) + tags["status"] = serviceStatus(service) + fields["status_code"] = service.Status + tags["pending_action"] = pendingAction(service) + fields["pending_action_code"] = service.PendingAction + tags["monitoring_status"] = monitoringStatus(service) + fields["monitoring_status_code"] = service.MonitoringStatus + tags["monitoring_mode"] = monitoringMode(service) + fields["monitoring_mode_code"] = service.MonitorMode + tags["service"] = service.Name + if service.Type == fileSystem { + fields["mode"] = service.Mode + fields["block_percent"] = service.Block.Percent + fields["block_usage"] = service.Block.Usage + fields["block_total"] = service.Block.Total + fields["inode_percent"] = service.Inode.Percent + fields["inode_usage"] = service.Inode.Usage + fields["inode_total"] = service.Inode.Total + acc.AddFields("monit_filesystem", fields, tags) + } else if service.Type == directory { + fields["mode"] = service.Mode + acc.AddFields("monit_directory", fields, tags) + } else if service.Type == file { + fields["size"] = service.Size + fields["mode"] = service.Mode + acc.AddFields("monit_file", fields, tags) + } else if service.Type == process { + fields["cpu_percent"] = service.CPU.Percent + fields["cpu_percent_total"] = service.CPU.PercentTotal + fields["mem_kb"] = service.Memory.Kilobyte + fields["mem_kb_total"] = service.Memory.KilobyteTotal + fields["mem_percent"] = service.Memory.Percent + fields["mem_percent_total"] = service.Memory.PercentTotal + fields["pid"] = service.Pid + fields["parent_pid"] = service.ParentPid + fields["threads"] = service.Threads + fields["children"] = service.Children + acc.AddFields("monit_process", fields, tags) + } else if service.Type == remoteHost { + fields["remote_hostname"] = service.Port.Hostname + fields["port_number"] = service.Port.PortNumber + fields["request"] = service.Port.Request + fields["response_time"] = service.Port.ResponseTime + fields["protocol"] = service.Port.Protocol + fields["type"] = service.Port.Type + acc.AddFields("monit_remote_host", fields, tags) + } else if service.Type == system { + fields["cpu_system"] = service.System.CPU.System + fields["cpu_user"] = service.System.CPU.User + fields["cpu_wait"] = service.System.CPU.Wait + fields["cpu_load_avg_1m"] = service.System.Load.Avg01 + fields["cpu_load_avg_5m"] = service.System.Load.Avg05 + fields["cpu_load_avg_15m"] = service.System.Load.Avg15 + fields["mem_kb"] = service.System.Memory.Kilobyte + fields["mem_percent"] = service.System.Memory.Percent + fields["swap_kb"] = service.System.Swap.Kilobyte + fields["swap_percent"] = service.System.Swap.Percent + acc.AddFields("monit_system", fields, tags) + } else if service.Type == fifo { + fields["mode"] = service.Mode + acc.AddFields("monit_fifo", fields, tags) + } else if service.Type == program { + fields["program_started"] = service.Program.Started * 10000000 + fields["program_status"] = service.Program.Status + acc.AddFields("monit_program", fields, tags) + } else if service.Type == network { + fields["link_state"] = service.Link.State + fields["link_speed"] = service.Link.Speed + fields["link_mode"] = linkMode(service) + fields["download_packets_now"] = service.Link.Download.Packets.Now + fields["download_packets_total"] = service.Link.Download.Packets.Total + fields["download_bytes_now"] = service.Link.Download.Bytes.Now + fields["download_bytes_total"] = service.Link.Download.Bytes.Total + fields["download_errors_now"] = service.Link.Download.Errors.Now + fields["download_errors_total"] = service.Link.Download.Errors.Total + fields["upload_packets_now"] = service.Link.Upload.Packets.Now + fields["upload_packets_total"] = service.Link.Upload.Packets.Total + fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now + fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total + fields["upload_errors_now"] = service.Link.Upload.Errors.Now + fields["upload_errors_total"] = service.Link.Upload.Errors.Total + acc.AddFields("monit_network", fields, tags) } - } else { - return fmt.Errorf("received status code %d (%s), expected 200", - resp.StatusCode, - http.StatusText(resp.StatusCode)) - } + return nil } @@ -364,9 +364,8 @@ func linkMode(s Service) string { func serviceStatus(s Service) string { if s.Status == 0 { return "running" - } else { - return "failure" } + return "failure" } func pendingAction(s Service) string { @@ -375,9 +374,8 @@ func pendingAction(s Service) string { return "unknown" } return pendingActions[s.PendingAction-1] - } else { - return "none" } + return "none" } func monitoringMode(s Service) string { diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 1d95b45a51bc5..ef47575e80b4c 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -4,19 +4,20 @@ import ( "errors" "net/http" "net/http/httptest" + "net/url" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type transportMock struct { } -func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { +func (t *transportMock) RoundTrip(_ *http.Request) (*http.Response, error) { errorString := "Get http://127.0.0.1:2812/_status?format=xml: " + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + "read: connection reset by peer" @@ -179,6 +180,7 @@ func TestServiceType(t *testing.T) { "request": "", "protocol": "DEFAULT", "type": "TCP", + "response_time": 0.000145, }, time.Unix(0, 0), ), @@ -333,14 +335,12 @@ func TestServiceType(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.NoError(t, err) + require.NoError(t, plugin.Gather(&acc)) - testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), - testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -532,14 +532,12 @@ func TestMonitFailure(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.NoError(t, err) + require.NoError(t, plugin.Gather(&acc)) - testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), - testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -553,7 +551,6 @@ func checkAuth(r *http.Request, username, password string) bool { } func TestAllowHosts(t *testing.T) { - r := &Monit{ Address: "http://127.0.0.1:2812", Username: "test", @@ -565,46 +562,36 @@ func TestAllowHosts(t *testing.T) { r.client.Transport = &transportMock{} err := r.Gather(&acc) - - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "read: connection reset by peer") - } + require.Error(t, err) + require.Contains(t, err.Error(), "read: connection reset by peer") } func TestConnection(t *testing.T) { - r := &Monit{ Address: "http://127.0.0.1:2812", Username: "test", Password: "test", } - var acc testutil.Accumulator + require.NoError(t, r.Init()) - r.Init() + var acc testutil.Accumulator err := r.Gather(&acc) - - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "connect: connection refused") - } + require.Error(t, err) + _, ok := err.(*url.Error) + require.True(t, ok) } func TestInvalidUsernameOrPassword(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return } - switch r.URL.Path { - case "/_status": - http.ServeFile(w, r, "testdata/response_servicetype_0.xml") - default: - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) defer ts.Close() @@ -617,28 +604,21 @@ func TestInvalidUsernameOrPassword(t *testing.T) { var acc testutil.Accumulator - r.Init() + require.NoError(t, r.Init()) err := r.Gather(&acc) - - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestNoUsernameOrPasswordConfiguration(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return } - switch r.URL.Path { - case "/_status": - http.ServeFile(w, r, "testdata/response_servicetype_0.xml") - default: - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) defer ts.Close() @@ -649,15 +629,13 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { var acc testutil.Accumulator - r.Init() + require.NoError(t, r.Init()) err := r.Gather(&acc) - - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestInvalidXMLAndInvalidTypes(t *testing.T) { - tests := []struct { name string filename string @@ -691,14 +669,13 @@ func TestInvalidXMLAndInvalidTypes(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "error parsing input:") - } + err := plugin.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing input:") }) } } diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index a9e8236ee0cf5..3fd128eb85e10 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -8,7 +8,7 @@ and creates metrics using one of the supported [input data formats][]. ```toml [[inputs.mqtt_consumer]] ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 26122b8e86b88..3e88cecbbce45 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -5,10 +5,13 @@ import ( "errors" "fmt" "strings" + "sync" "time" - "github.com/eclipse/paho.mqtt.golang" + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -17,7 +20,7 @@ import ( var ( // 30 Seconds is the default used by paho.mqtt.golang - defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} + defaultConnectionTimeout = config.Duration(30 * time.Second) defaultMaxUndeliveredMessages = 1000 ) @@ -42,14 +45,14 @@ type Client interface { type ClientFactory func(o *mqtt.ClientOptions) Client type MQTTConsumer struct { - Servers []string `toml:"servers"` - Topics []string `toml:"topics"` - TopicTag *string `toml:"topic_tag"` - Username string `toml:"username"` - Password string `toml:"password"` - QoS int `toml:"qos"` - ConnectionTimeout internal.Duration `toml:"connection_timeout"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Servers []string `toml:"servers"` + Topics []string `toml:"topics"` + TopicTag *string `toml:"topic_tag"` + Username string `toml:"username"` + Password string `toml:"password"` + QoS int `toml:"qos"` + ConnectionTimeout config.Duration `toml:"connection_timeout"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` parser parsers.Parser @@ -62,14 +65,15 @@ type MQTTConsumer struct { Log telegraf.Logger - clientFactory ClientFactory - client Client - opts *mqtt.ClientOptions - acc telegraf.TrackingAccumulator - state ConnectionState - sem semaphore - messages map[telegraf.TrackingID]bool - topicTag string + clientFactory ClientFactory + client Client + opts *mqtt.ClientOptions + acc telegraf.TrackingAccumulator + state ConnectionState + sem semaphore + messages map[telegraf.TrackingID]bool + messagesMutex sync.Mutex + chosenTopicTag string ctx context.Context cancel context.CancelFunc @@ -77,7 +81,7 @@ type MQTTConsumer struct { var sampleConfig = ` ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] @@ -167,13 +171,13 @@ func (m *MQTTConsumer) Init() error { return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } - if m.ConnectionTimeout.Duration < 1*time.Second { - return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) + if time.Duration(m.ConnectionTimeout) < 1*time.Second { + return fmt.Errorf("connection_timeout must be greater than 1s: %s", time.Duration(m.ConnectionTimeout)) } - m.topicTag = "topic" + m.chosenTopicTag = "topic" if m.TopicTag != nil { - m.topicTag = *m.TopicTag + m.chosenTopicTag = *m.TopicTag } opts, err := m.createOpts() @@ -182,6 +186,7 @@ func (m *MQTTConsumer) Init() error { } m.opts = opts + m.messages = map[telegraf.TrackingID]bool{} return nil } @@ -204,9 +209,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { } m.state = Connecting - m.connect() - - return nil + return m.connect() } func (m *MQTTConsumer) connect() error { @@ -219,7 +222,6 @@ func (m *MQTTConsumer) connect() error { m.Log.Infof("Connected %v", m.Servers) m.state = Connected - m.messages = make(map[telegraf.TrackingID]bool) // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. @@ -246,18 +248,18 @@ func (m *MQTTConsumer) connect() error { return nil } -func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { +func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected - return } -func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { +func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { for { select { case track := <-m.acc.Delivered(): <-m.sem + m.messagesMutex.Lock() _, ok := m.messages[track.ID()] if !ok { // Added by a previous connection @@ -265,6 +267,7 @@ func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { } // No ack, MQTT does not support durable handling delete(m.messages, track.ID()) + m.messagesMutex.Unlock() case m.sem <- empty{}: err := m.onMessage(m.acc, msg) if err != nil { @@ -282,15 +285,17 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess return err } - if m.topicTag != "" { + if m.chosenTopicTag != "" { topic := msg.Topic() for _, metric := range metrics { - metric.AddTag(m.topicTag, topic) + metric.AddTag(m.chosenTopicTag, topic) } } id := acc.AddTrackingMetricGroup(metrics) + m.messagesMutex.Lock() m.messages[id] = true + m.messagesMutex.Unlock() return nil } @@ -304,11 +309,11 @@ func (m *MQTTConsumer) Stop() { m.cancel() } -func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { +func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting m.Log.Debugf("Connecting %v", m.Servers) - m.connect() + return m.connect() } return nil @@ -317,7 +322,7 @@ func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts := mqtt.NewClientOptions() - opts.ConnectTimeout = m.ConnectionTimeout.Duration + opts.ConnectTimeout = time.Duration(m.ConnectionTimeout) if m.ClientID == "" { opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 4884fc0508107..a9b85c108ab65 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/eclipse/paho.mqtt.golang" + mqtt "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -49,20 +49,21 @@ type FakeParser struct { // FakeParser satisfies parsers.Parser var _ parsers.Parser = &FakeParser{} -func (p *FakeParser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *FakeParser) Parse(_ []byte) ([]telegraf.Metric, error) { panic("not implemented") } -func (p *FakeParser) ParseLine(line string) (telegraf.Metric, error) { +func (p *FakeParser) ParseLine(_ string) (telegraf.Metric, error) { panic("not implemented") } -func (p *FakeParser) SetDefaultTags(tags map[string]string) { +func (p *FakeParser) SetDefaultTags(_ map[string]string) { panic("not implemented") } type FakeToken struct { sessionPresent bool + complete chan struct{} } // FakeToken satisfies mqtt.Token @@ -84,6 +85,10 @@ func (t *FakeToken) SessionPresent() bool { return t.sessionPresent } +func (t *FakeToken) Done() <-chan struct{} { + return t.complete +} + // Test the basic lifecycle transitions of the plugin. func TestLifecycleSanity(t *testing.T) { var acc testutil.Accumulator diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 9c9813d9acf5c..65c2ac4e4b783 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -2,10 +2,9 @@ package multifile import ( "bytes" - "errors" "fmt" - "io/ioutil" "math" + "os" "path" "strconv" "time" @@ -85,7 +84,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { tags := make(map[string]string) for _, file := range m.Files { - fileContents, err := ioutil.ReadFile(file.Name) + fileContents, err := os.ReadFile(file.Name) if err != nil { if m.FailEarly { @@ -103,7 +102,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { var value interface{} - var d int = 0 + var d int if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" { var v float64 v, err = strconv.ParseFloat(vStr, 64) @@ -130,7 +129,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { } if value == nil { - return errors.New(fmt.Sprintf("invalid conversion %v", file.Conversion)) + return fmt.Errorf("invalid conversion %v", file.Conversion) } fields[file.Dest] = value diff --git a/plugins/inputs/multifile/multifile_test.go b/plugins/inputs/multifile/multifile_test.go index b12f29f35c2cd..214cebd136f9c 100644 --- a/plugins/inputs/multifile/multifile_test.go +++ b/plugins/inputs/multifile/multifile_test.go @@ -5,9 +5,9 @@ import ( "path" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestFileTypes(t *testing.T) { @@ -32,8 +32,8 @@ func TestFileTypes(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) + require.Equal(t, map[string]interface{}{ "examplebool": true, "examplestring": "hello world", "exampleint": int64(123456), @@ -60,7 +60,7 @@ func FailEarly(failEarly bool, t *testing.T) error { err := m.Gather(&acc) if err == nil { - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "exampleint": int64(123456), }, acc.Metrics[0].Fields) } diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 644d4cf8d7887..0a96f9c9b1447 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -63,9 +63,15 @@ This plugin gathers the statistic data from MySQL server ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS # gather_innodb_metrics = false + ## gather metrics from all channels from SHOW SLAVE STATUS command output + # gather_all_slave_channels = false + ## gather metrics from SHOW SLAVE STATUS command output # gather_slave_status = false + ## use SHOW ALL SLAVES STATUS command output for MariaDB + # mariadb_dialect = false + ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false @@ -88,6 +94,15 @@ This plugin gathers the statistic data from MySQL server # gather_file_events_stats = false ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST + # gather_perf_events_statements = false + # + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME + # gather_perf_sum_per_acc_per_event = false + # + ## list of events to be gathered for gather_perf_sum_per_acc_per_event + ## in case of empty list all events will be gathered + # perf_summary_events = [] + # # gather_perf_events_statements = false ## the limits for metrics form perf_events_statements @@ -196,7 +211,9 @@ measurement name. * Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when the single-source replication is on. If the multi-source replication is set, then everything works differently, this metric does not work with multi-source -replication. +replication, unless you set `gather_all_slave_channels = true`. For MariaDB, +`mariadb_dialect = true` should be set to address the field names and commands +differences. * slave_[column name]() * Binary logs - all metrics including size and count of all binary files. Requires to be turned on in configuration. diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 7ce9bd1666173..3fbd4654ef2b4 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1,7 +1,6 @@ package mysql import ( - "bytes" "database/sql" "fmt" "strconv" @@ -10,11 +9,12 @@ import ( "time" "github.com/go-sql-driver/mysql" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" - "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" + v1 "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" + v2 "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" ) type Mysql struct { @@ -28,6 +28,8 @@ type Mysql struct { GatherInfoSchemaAutoInc bool `toml:"gather_info_schema_auto_inc"` GatherInnoDBMetrics bool `toml:"gather_innodb_metrics"` GatherSlaveStatus bool `toml:"gather_slave_status"` + GatherAllSlaveChannels bool `toml:"gather_all_slave_channels"` + MariadbDialect bool `toml:"mariadb_dialect"` GatherBinaryLogs bool `toml:"gather_binary_logs"` GatherTableIOWaits bool `toml:"gather_table_io_waits"` GatherTableLockWaits bool `toml:"gather_table_lock_waits"` @@ -37,6 +39,8 @@ type Mysql struct { GatherFileEventsStats bool `toml:"gather_file_events_stats"` GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"` GatherGlobalVars bool `toml:"gather_global_variables"` + GatherPerfSummaryPerAccountPerEvent bool `toml:"gather_perf_sum_per_acc_per_event"` + PerfSummaryEvents []string `toml:"perf_summary_events"` IntervalSlow string `toml:"interval_slow"` MetricVersion int `toml:"metric_version"` @@ -45,6 +49,7 @@ type Mysql struct { lastT time.Time initDone bool scanIntervalSlow uint32 + getStatusQuery string } const sampleConfig = ` @@ -92,6 +97,12 @@ const sampleConfig = ` ## gather metrics from SHOW SLAVE STATUS command output # gather_slave_status = false + ## gather metrics from all channels from SHOW SLAVE STATUS command output + # gather_all_slave_channels = false + + ## use MariaDB dialect for all channels SHOW SLAVE STATUS + # mariadb_dialect = false + ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false @@ -121,6 +132,13 @@ const sampleConfig = ` # perf_events_statements_limit = 250 # perf_events_statements_time_limit = 86400 + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME + # gather_perf_sum_per_acc_per_event = false + + ## list of events to be gathered for gather_perf_sum_per_acc_per_event + ## in case of empty list all events will be gathered + # perf_summary_events = [] + ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) ## example: interval_slow = "30m" # interval_slow = "" @@ -134,7 +152,6 @@ const sampleConfig = ` ` const ( - defaultTimeout = 5 * time.Second defaultPerfEventsStatementsDigestTextLimit = 120 defaultPerfEventsStatementsLimit = 250 defaultPerfEventsStatementsTimeLimit = 86400 @@ -158,6 +175,11 @@ func (m *Mysql) InitMysql() { m.scanIntervalSlow = uint32(interval.Seconds()) } } + if m.MariadbDialect { + m.getStatusQuery = slaveStatusQueryMariadb + } else { + m.getStatusQuery = slaveStatusQuery + } m.initDone = true } @@ -177,7 +199,9 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error { } if tlsConfig != nil { - mysql.RegisterTLSConfig("custom", tlsConfig) + if err := mysql.RegisterTLSConfig("custom", tlsConfig); err != nil { + return err + } } var wg sync.WaitGroup @@ -285,6 +309,7 @@ const ( globalStatusQuery = `SHOW GLOBAL STATUS` globalVariablesQuery = `SHOW GLOBAL VARIABLES` slaveStatusQuery = `SHOW SLAVE STATUS` + slaveStatusQueryMariadb = `SHOW ALL SLAVES STATUS` binaryLogsQuery = `SHOW BINARY LOGS` infoSchemaProcessListQuery = ` SELECT COALESCE(command,''),COALESCE(state,''),count(*) @@ -416,6 +441,38 @@ const ( FROM information_schema.tables WHERE table_schema = 'performance_schema' AND table_name = ? ` + + perfSummaryPerAccountPerEvent = ` + SELECT + coalesce(user, "unknown"), + coalesce(host, "unknown"), + coalesce(event_name, "unknown"), + count_star, + sum_timer_wait, + min_timer_wait, + avg_timer_wait, + max_timer_wait, + sum_lock_time, + sum_errors, + sum_warnings, + sum_rows_affected, + sum_rows_sent, + sum_rows_examined, + sum_created_tmp_disk_tables, + sum_created_tmp_tables, + sum_select_full_join, + sum_select_full_range_join, + sum_select_range, + sum_select_range_check, + sum_select_scan, + sum_sort_merge_passes, + sum_sort_range, + sum_sort_rows, + sum_sort_scan, + sum_no_index_used, + sum_no_good_index_used + FROM performance_schema.events_statements_summary_by_account_by_event_name + ` ) func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { @@ -491,6 +548,13 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { } } + if m.GatherPerfSummaryPerAccountPerEvent { + err = m.gatherPerfSummaryPerAccountPerEvent(db, serv, acc) + if err != nil { + return err + } + } + if m.GatherTableIOWaits { err = m.gatherPerfTableIOWaits(db, serv, acc) if err != nil { @@ -573,7 +637,12 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu value, err := m.parseGlobalVariables(key, val) if err != nil { - m.Log.Debugf("Error parsing global variable %q: %v", key, err) + errString := fmt.Errorf("error parsing mysql global variable %q=%q: %v", key, string(val), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } } else { fields[key] = value } @@ -593,14 +662,9 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { if m.MetricVersion < 2 { - v, ok := v1.ParseValue(value) - if ok { - return v, nil - } - return v, fmt.Errorf("could not parse value: %q", string(value)) - } else { - return v2.ConvertGlobalVariables(key, value) + return v1.ParseValue(value) } + return v2.ConvertGlobalVariables(key, value) } // gatherSlaveStatuses can be used to get replication analytics @@ -609,7 +673,10 @@ func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{ // This code does not work with multi-source replication. func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error { // run query - rows, err := db.Query(slaveStatusQuery) + var rows *sql.Rows + var err error + + rows, err = db.Query(m.getStatusQuery) if err != nil { return err } @@ -620,32 +687,72 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu tags := map[string]string{"server": servtag} fields := make(map[string]interface{}) - // to save the column names as a field key - // scanning keys and values separately - if rows.Next() { + // for each channel record + for rows.Next() { + // to save the column names as a field key + // scanning keys and values separately + // get columns names, and create an array with its length - cols, err := rows.Columns() + cols, err := rows.ColumnTypes() if err != nil { return err } - vals := make([]interface{}, len(cols)) + vals := make([]sql.RawBytes, len(cols)) + valPtrs := make([]interface{}, len(cols)) // fill the array with sql.Rawbytes for i := range vals { - vals[i] = &sql.RawBytes{} + vals[i] = sql.RawBytes{} + valPtrs[i] = &vals[i] } - if err = rows.Scan(vals...); err != nil { + if err = rows.Scan(valPtrs...); err != nil { return err } + // range over columns, and try to parse values for i, col := range cols { + colName := col.Name() + if m.MetricVersion >= 2 { - col = strings.ToLower(col) + colName = strings.ToLower(colName) + } + + colValue := vals[i] + + if m.GatherAllSlaveChannels && + (strings.ToLower(colName) == "channel_name" || strings.ToLower(colName) == "connection_name") { + // Since the default channel name is empty, we need this block + channelName := "default" + if len(colValue) > 0 { + channelName = string(colValue) + } + tags["channel"] = channelName + continue } - if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok { - fields["slave_"+col] = value + + if colValue == nil || len(colValue) == 0 { + continue } + + value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName()) + if err != nil { + errString := fmt.Errorf("error parsing mysql slave status %q=%q: %v", colName, string(colValue), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } + continue + } + + fields["slave_"+colName] = value } acc.AddFields("mysql", fields, tags) + + // Only the first row is relevant if not all slave-channels should be gathered, + // so break here and skip the remaining rows + if !m.GatherAllSlaveChannels { + break + } } return nil @@ -665,17 +772,31 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} var ( - size uint64 = 0 - count uint64 = 0 - fileSize uint64 - fileName string + size uint64 + count uint64 + fileSize uint64 + fileName string + encrypted string ) + columns, err := rows.Columns() + if err != nil { + return err + } + numColumns := len(columns) + // iterate over rows and count the size and count of files for rows.Next() { - if err := rows.Scan(&fileName, &fileSize); err != nil { - return err + if numColumns == 3 { + if err := rows.Scan(&fileName, &fileSize, &encrypted); err != nil { + return err + } + } else { + if err := rows.Scan(&fileName, &fileSize); err != nil { + return err + } } + size += fileSize count++ } @@ -683,6 +804,7 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat "binary_size_bytes": size, "binary_files_count": count, } + acc.AddFields("mysql", fields, tags) return nil } @@ -734,42 +856,42 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum case "Queries": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["queries"] = i } case "Questions": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["questions"] = i } case "Slow_queries": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["slow_queries"] = i } case "Connections": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["connections"] = i } case "Syncs": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["syncs"] = i } case "Uptime": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["uptime"] = i } @@ -778,7 +900,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum key = strings.ToLower(key) value, err := v2.ConvertGlobalStatus(key, val) if err != nil { - m.Log.Debugf("Error parsing global status: %v", err) + acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %v", key, string(val), err)) } else { fields[key] = value } @@ -807,6 +929,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return err } defer rows.Close() + var ( command string state string @@ -846,16 +969,17 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. } // get count of connections from each user - conn_rows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") + connRows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") if err != nil { return err } + defer connRows.Close() - for conn_rows.Next() { + for connRows.Next() { var user string var connections int64 - err = conn_rows.Scan(&user, &connections) + err = connRows.Scan(&user, &connections) if err != nil { return err } @@ -917,7 +1041,7 @@ func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegr case *string: fields[cols[i]] = *v default: - return fmt.Errorf("Unknown column type - %T", v) + return fmt.Errorf("unknown column type - %T", v) } } acc.AddFields("mysql_user_stats", fields, tags) @@ -942,146 +1066,146 @@ func columnsToLower(s []string, e error) ([]string, error) { func getColSlice(l int) ([]interface{}, error) { // list of all possible column names var ( - user string - total_connections int64 - concurrent_connections int64 - connected_time int64 - busy_time int64 - cpu_time int64 - bytes_received int64 - bytes_sent int64 - binlog_bytes_written int64 - rows_read int64 - rows_sent int64 - rows_deleted int64 - rows_inserted int64 - rows_updated int64 - select_commands int64 - update_commands int64 - other_commands int64 - commit_transactions int64 - rollback_transactions int64 - denied_connections int64 - lost_connections int64 - access_denied int64 - empty_queries int64 - total_ssl_connections int64 - max_statement_time_exceeded int64 + user string + totalConnections int64 + concurrentConnections int64 + connectedTime int64 + busyTime int64 + cpuTime int64 + bytesReceived int64 + bytesSent int64 + binlogBytesWritten int64 + rowsRead int64 + rowsSent int64 + rowsDeleted int64 + rowsInserted int64 + rowsUpdated int64 + selectCommands int64 + updateCommands int64 + otherCommands int64 + commitTransactions int64 + rollbackTransactions int64 + deniedConnections int64 + lostConnections int64 + accessDenied int64 + emptyQueries int64 + totalSslConnections int64 + maxStatementTimeExceeded int64 // maria specific - fbusy_time float64 - fcpu_time float64 + fbusyTime float64 + fcpuTime float64 // percona specific - rows_fetched int64 - table_rows_read int64 + rowsFetched int64 + tableRowsRead int64 ) switch l { case 23: // maria5 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &fbusy_time, - &fcpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_read, - &rows_sent, - &rows_deleted, - &rows_inserted, - &rows_updated, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, + &totalConnections, + &concurrentConnections, + &connectedTime, + &fbusyTime, + &fcpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsRead, + &rowsSent, + &rowsDeleted, + &rowsInserted, + &rowsUpdated, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, }, nil case 25: // maria10 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &fbusy_time, - &fcpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_read, - &rows_sent, - &rows_deleted, - &rows_inserted, - &rows_updated, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, - &total_ssl_connections, - &max_statement_time_exceeded, + &totalConnections, + &concurrentConnections, + &connectedTime, + &fbusyTime, + &fcpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsRead, + &rowsSent, + &rowsDeleted, + &rowsInserted, + &rowsUpdated, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, + &totalSslConnections, + &maxStatementTimeExceeded, }, nil case 21: // mysql 5.5 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &busy_time, - &cpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_fetched, - &rows_updated, - &table_rows_read, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, + &totalConnections, + &concurrentConnections, + &connectedTime, + &busyTime, + &cpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsFetched, + &rowsUpdated, + &tableRowsRead, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, }, nil case 22: // percona return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &busy_time, - &cpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_fetched, - &rows_updated, - &table_rows_read, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, - &total_ssl_connections, + &totalConnections, + &concurrentConnections, + &connectedTime, + &busyTime, + &cpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsFetched, + &rowsUpdated, + &tableRowsRead, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, + &totalSslConnections, }, nil } - return nil, fmt.Errorf("Not Supported - %d columns", l) + return nil, fmt.Errorf("not Supported - %d columns", l) } // gatherPerfTableIOWaits can be used to get total count and time @@ -1245,10 +1369,16 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu if err := rows.Scan(&key, &val); err != nil { return err } + key = strings.ToLower(key) - if value, ok := m.parseValue(val); ok { - fields[key] = value + value, err := m.parseValueByDatabaseTypeName(val, "BIGINT") + if err != nil { + acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %v", key, string(val), err)) + continue } + + fields[key] = value + // Send 20 fields at a time if len(fields) >= 20 { acc.AddFields("mysql_innodb", fields, tags) @@ -1262,6 +1392,142 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu return nil } +// gatherPerfSummaryPerAccountPerEvent can be used to fetch enabled metrics from +// performance_schema.events_statements_summary_by_account_by_event_name +func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, serv string, acc telegraf.Accumulator) error { + sqlQuery := perfSummaryPerAccountPerEvent + + var rows *sql.Rows + var err error + + var ( + srcUser string + srcHost string + eventName string + countStar float64 + sumTimerWait float64 + minTimerWait float64 + avgTimerWait float64 + maxTimerWait float64 + sumLockTime float64 + sumErrors float64 + sumWarnings float64 + sumRowsAffected float64 + sumRowsSent float64 + sumRowsExamined float64 + sumCreatedTmpDiskTables float64 + sumCreatedTmpTables float64 + sumSelectFullJoin float64 + sumSelectFullRangeJoin float64 + sumSelectRange float64 + sumSelectRangeCheck float64 + sumSelectScan float64 + sumSortMergePasses float64 + sumSortRange float64 + sumSortRows float64 + sumSortScan float64 + sumNoIndexUsed float64 + sumNoGoodIndexUsed float64 + ) + + var events []interface{} + // if we have perf_summary_events set - select only listed events (adding filter criteria for rows) + if len(m.PerfSummaryEvents) > 0 { + sqlQuery += " WHERE EVENT_NAME IN (" + for i, eventName := range m.PerfSummaryEvents { + if i > 0 { + sqlQuery += ", " + } + sqlQuery += "?" + events = append(events, eventName) + } + sqlQuery += ")" + + rows, err = db.Query(sqlQuery, events...) + } else { + // otherwise no filter, hence, select all rows + rows, err = db.Query(perfSummaryPerAccountPerEvent) + } + + if err != nil { + return err + } + defer rows.Close() + + // parse DSN and save server tag + servtag := getDSNTag(serv) + tags := map[string]string{"server": servtag} + for rows.Next() { + if err := rows.Scan( + &srcUser, + &srcHost, + &eventName, + &countStar, + &sumTimerWait, + &minTimerWait, + &avgTimerWait, + &maxTimerWait, + &sumLockTime, + &sumErrors, + &sumWarnings, + &sumRowsAffected, + &sumRowsSent, + &sumRowsExamined, + &sumCreatedTmpDiskTables, + &sumCreatedTmpTables, + &sumSelectFullJoin, + &sumSelectFullRangeJoin, + &sumSelectRange, + &sumSelectRangeCheck, + &sumSelectScan, + &sumSortMergePasses, + &sumSortRange, + &sumSortRows, + &sumSortScan, + &sumNoIndexUsed, + &sumNoGoodIndexUsed, + ); err != nil { + return err + } + srcUser = strings.ToLower(srcUser) + srcHost = strings.ToLower(srcHost) + + sqlLWTags := copyTags(tags) + sqlLWTags["src_user"] = srcUser + sqlLWTags["src_host"] = srcHost + sqlLWTags["event"] = eventName + sqlLWFields := map[string]interface{}{ + "count_star": countStar, + "sum_timer_wait": sumTimerWait, + "min_timer_wait": minTimerWait, + "avg_timer_wait": avgTimerWait, + "max_timer_wait": maxTimerWait, + "sum_lock_time": sumLockTime, + "sum_errors": sumErrors, + "sum_warnings": sumWarnings, + "sum_rows_affected": sumRowsAffected, + "sum_rows_sent": sumRowsSent, + "sum_rows_examined": sumRowsExamined, + "sum_created_tmp_disk_tables": sumCreatedTmpDiskTables, + "sum_created_tmp_tables": sumCreatedTmpTables, + "sum_select_full_join": sumSelectFullJoin, + "sum_select_full_range_join": sumSelectFullRangeJoin, + "sum_select_range": sumSelectRange, + "sum_select_range_check": sumSelectRangeCheck, + "sum_select_scan": sumSelectScan, + "sum_sort_merge_passes": sumSortMergePasses, + "sum_sort_range": sumSortRange, + "sum_sort_rows": sumSortRows, + "sum_sort_scan": sumSortScan, + "sum_no_index_used": sumNoIndexUsed, + "sum_no_good_index_used": sumNoGoodIndexUsed, + } + acc.AddFields("mysql_perf_acc_event", sqlLWFields, sqlLWTags) + } + + return nil +} + // gatherPerfTableLockWaits can be used to get // the total number and time for SQL and external lock wait events // for each table and operation @@ -1479,8 +1745,8 @@ func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, serv string, acc telegr fields["file_events_seconds_total"] = sumTimerWrite / picoSeconds fields["file_events_bytes_totals"] = sumNumBytesWrite acc.AddFields("mysql_perf_schema", fields, writeTags) - } + return nil } @@ -1501,7 +1767,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf defer rows.Close() var ( - schemaName, digest, digest_text string + schemaName, digest, digestText string count, queryTime, errors, warnings float64 rowsAffected, rowsSent, rowsExamined float64 tmpTables, tmpDiskTables float64 @@ -1516,7 +1782,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf for rows.Next() { err = rows.Scan( - &schemaName, &digest, &digest_text, + &schemaName, &digest, &digestText, &count, &queryTime, &errors, &warnings, &rowsAffected, &rowsSent, &rowsExamined, &tmpTables, &tmpDiskTables, @@ -1529,7 +1795,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf } tags["schema"] = schemaName tags["digest"] = digest - tags["digest_text"] = digest_text + tags["digest_text"] = digestText fields := map[string]interface{}{ "events_statements_total": count, @@ -1578,124 +1844,121 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula } for _, database := range dbList { - rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + err := m.gatherSchemaForDB(db, database, servtag, acc) if err != nil { return err } - defer rows.Close() - var ( - tableSchema string - tableName string - tableType string - engine string - version float64 - rowFormat string - tableRows float64 - dataLength float64 - indexLength float64 - dataFree float64 - createOptions string + } + return nil +} + +func (m *Mysql) gatherSchemaForDB(db *sql.DB, database string, servtag string, acc telegraf.Accumulator) error { + rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + if err != nil { + return err + } + defer rows.Close() + + var ( + tableSchema string + tableName string + tableType string + engine string + version float64 + rowFormat string + tableRows float64 + dataLength float64 + indexLength float64 + dataFree float64 + createOptions string + ) + + for rows.Next() { + err = rows.Scan( + &tableSchema, + &tableName, + &tableType, + &engine, + &version, + &rowFormat, + &tableRows, + &dataLength, + &indexLength, + &dataFree, + &createOptions, ) - for rows.Next() { - err = rows.Scan( - &tableSchema, - &tableName, - &tableType, - &engine, - &version, - &rowFormat, - &tableRows, - &dataLength, - &indexLength, - &dataFree, - &createOptions, - ) - if err != nil { - return err - } - tags := map[string]string{"server": servtag} - tags["schema"] = tableSchema - tags["table"] = tableName + if err != nil { + return err + } + tags := map[string]string{"server": servtag} + tags["schema"] = tableSchema + tags["table"] = tableName - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_rows"), - map[string]interface{}{"value": tableRows}, tags) - - dlTags := copyTags(tags) - dlTags["component"] = "data_length" - acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), - map[string]interface{}{"value": dataLength}, dlTags) - - ilTags := copyTags(tags) - ilTags["component"] = "index_length" - acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), - map[string]interface{}{"value": indexLength}, ilTags) - - dfTags := copyTags(tags) - dfTags["component"] = "data_free" - acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), - map[string]interface{}{"value": dataFree}, dfTags) - } else { - acc.AddFields("mysql_table_schema", - map[string]interface{}{"rows": tableRows}, tags) + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_rows"), + map[string]interface{}{"value": tableRows}, tags) + + dlTags := copyTags(tags) + dlTags["component"] = "data_length" + acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), + map[string]interface{}{"value": dataLength}, dlTags) + + ilTags := copyTags(tags) + ilTags["component"] = "index_length" + acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), + map[string]interface{}{"value": indexLength}, ilTags) + + dfTags := copyTags(tags) + dfTags["component"] = "data_free" + acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), + map[string]interface{}{"value": dataFree}, dfTags) + } else { + acc.AddFields("mysql_table_schema", + map[string]interface{}{"rows": tableRows}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_length": dataLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_length": dataLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"index_length": indexLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"index_length": indexLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_free": dataFree}, tags) - } + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_free": dataFree}, tags) + } - versionTags := copyTags(tags) - versionTags["type"] = tableType - versionTags["engine"] = engine - versionTags["row_format"] = rowFormat - versionTags["create_options"] = createOptions + versionTags := copyTags(tags) + versionTags["type"] = tableType + versionTags["engine"] = engine + versionTags["row_format"] = rowFormat + versionTags["create_options"] = createOptions - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_version"), - map[string]interface{}{"value": version}, versionTags) - } else { - acc.AddFields("mysql_table_schema_version", - map[string]interface{}{"table_version": version}, versionTags) - } + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_version"), + map[string]interface{}{"value": version}, versionTags) + } else { + acc.AddFields("mysql_table_schema_version", + map[string]interface{}{"table_version": version}, versionTags) } } return nil } -func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) { +func (m *Mysql) parseValueByDatabaseTypeName(value sql.RawBytes, databaseTypeName string) (interface{}, error) { if m.MetricVersion < 2 { return v1.ParseValue(value) - } else { - return parseValue(value) - } -} - -// parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1 -func parseValue(value sql.RawBytes) (interface{}, bool) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { - return 1, true - } - - if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { - return 0, true - } - - if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { - return val, true - } - if val, err := strconv.ParseFloat(string(value), 64); err == nil { - return val, true } - if len(string(value)) > 0 { - return string(value), true + switch databaseTypeName { + case "INT": + return v2.ParseInt(value) + case "BIGINT": + return v2.ParseUint(value) + case "VARCHAR": + return v2.ParseString(value) + default: + m.Log.Debugf("unknown database type name %q in parseValueByDatabaseTypeName", databaseTypeName) + return v2.ParseValue(value) } - return nil, false } // findThreadState can be used to find thread state by command and plain state diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index be9c338bf7b0e..868c86f18b9cb 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -1,16 +1,15 @@ package mysql import ( - "database/sql" "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) -func TestMysqlDefaultsToLocal(t *testing.T) { +func TestMysqlDefaultsToLocalIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -23,10 +22,10 @@ func TestMysqlDefaultsToLocal(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) } -func TestMysqlMultipleInstances(t *testing.T) { +func TestMysqlMultipleInstancesIntegration(t *testing.T) { // Invoke Gather() from two separate configurations and // confirm they don't interfere with each other if testing.Short() { @@ -34,25 +33,28 @@ func TestMysqlMultipleInstances(t *testing.T) { } testServer := "root@tcp(127.0.0.1:3306)/?tls=false" m := &Mysql{ - Servers: []string{testServer}, - IntervalSlow: "30s", + Servers: []string{testServer}, + IntervalSlow: "30s", + GatherGlobalVars: true, + MetricVersion: 2, } var acc, acc2 testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) // acc should have global variables - assert.True(t, acc.HasMeasurement("mysql_variables")) + require.True(t, acc.HasMeasurement("mysql_variables")) m2 := &Mysql{ - Servers: []string{testServer}, + Servers: []string{testServer}, + MetricVersion: 2, } err = m2.Gather(&acc2) require.NoError(t, err) - assert.True(t, acc2.HasMeasurement("mysql")) + require.True(t, acc2.HasMeasurement("mysql")) // acc2 should not have global variables - assert.False(t, acc2.HasMeasurement("mysql_variables")) + require.False(t, acc2.HasMeasurement("mysql_variables")) } func TestMysqlMultipleInits(t *testing.T) { @@ -62,16 +64,16 @@ func TestMysqlMultipleInits(t *testing.T) { m2 := &Mysql{} m.InitMysql() - assert.True(t, m.initDone) - assert.False(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.False(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) m2.InitMysql() - assert.True(t, m.initDone) - assert.True(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.True(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) } func TestMysqlGetDSNTag(t *testing.T) { @@ -175,29 +177,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { } } } -func TestParseValue(t *testing.T) { - testCases := []struct { - rawByte sql.RawBytes - output interface{} - boolValue bool - }{ - {sql.RawBytes("123"), int64(123), true}, - {sql.RawBytes("abc"), "abc", true}, - {sql.RawBytes("10.1"), 10.1, true}, - {sql.RawBytes("ON"), 1, true}, - {sql.RawBytes("OFF"), 0, true}, - {sql.RawBytes("NO"), 0, true}, - {sql.RawBytes("YES"), 1, true}, - {sql.RawBytes("No"), 0, true}, - {sql.RawBytes("Yes"), 1, true}, - {sql.RawBytes(""), nil, false}, - } - for _, cases := range testCases { - if got, ok := parseValue(cases.rawByte); got != cases.output && ok != cases.boolValue { - t.Errorf("for %s wanted %t, got %t", string(cases.rawByte), cases.output, got) - } - } -} + func TestNewNamespace(t *testing.T) { testCases := []struct { words []string diff --git a/plugins/inputs/mysql/v1/mysql.go b/plugins/inputs/mysql/v1/mysql.go index 6f6062d14f4db..7f4e1a7dcacae 100644 --- a/plugins/inputs/mysql/v1/mysql.go +++ b/plugins/inputs/mysql/v1/mysql.go @@ -182,14 +182,14 @@ var Mappings = []*Mapping{ }, } -func ParseValue(value sql.RawBytes) (float64, bool) { - if bytes.Compare(value, []byte("Yes")) == 0 || bytes.Compare(value, []byte("ON")) == 0 { - return 1, true +func ParseValue(value sql.RawBytes) (float64, error) { + if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) { + return 1, nil } - if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 { - return 0, true + if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) { + return 0, nil } n, err := strconv.ParseFloat(string(value), 64) - return n, err == nil + return n, err } diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index a3ac3e976d6a3..b446890c9baec 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -21,6 +21,14 @@ func ParseInt(value sql.RawBytes) (interface{}, error) { return v, err } +func ParseUint(value sql.RawBytes) (interface{}, error) { + return strconv.ParseUint(string(value), 10, 64) +} + +func ParseFloat(value sql.RawBytes) (interface{}, error) { + return strconv.ParseFloat(string(value), 64) +} + func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { return int64(1), nil @@ -29,6 +37,10 @@ func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { return int64(0), nil } +func ParseString(value sql.RawBytes) (interface{}, error) { + return string(value), nil +} + func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { // https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html v := string(value) @@ -47,17 +59,20 @@ func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { } func ParseValue(value sql.RawBytes) (interface{}, error) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { + if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) { return 1, nil } - if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { + if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) { return 0, nil } if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { return val, nil } + if val, err := strconv.ParseUint(string(value), 10, 64); err == nil { + return val, nil + } if val, err := strconv.ParseFloat(string(value), 64); err == nil { return val, nil } @@ -70,12 +85,33 @@ func ParseValue(value sql.RawBytes) (interface{}, error) { } var GlobalStatusConversions = map[string]ConversionFunc{ - "ssl_ctx_verify_depth": ParseInt, - "ssl_verify_depth": ParseInt, + "innodb_available_undo_logs": ParseUint, + "innodb_buffer_pool_pages_misc": ParseUint, + "innodb_data_pending_fsyncs": ParseUint, + "ssl_ctx_verify_depth": ParseUint, + "ssl_verify_depth": ParseUint, + + // see https://galeracluster.com/library/documentation/galera-status-variables.html + "wsrep_local_index": ParseUint, + "wsrep_local_send_queue_avg": ParseFloat, } var GlobalVariableConversions = map[string]ConversionFunc{ - "gtid_mode": ParseGTIDMode, + // see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html + // see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html + "delay_key_write": ParseString, // ON, OFF, ALL + "enforce_gtid_consistency": ParseString, // ON, OFF, WARN + "event_scheduler": ParseString, // YES, NO, DISABLED + "gtid_mode": ParseGTIDMode, + "have_openssl": ParseBoolAsInteger, // alias for have_ssl + "have_ssl": ParseBoolAsInteger, // YES, DISABLED + "have_symlink": ParseBoolAsInteger, // YES, NO, DISABLED + "session_track_gtids": ParseString, + "session_track_transaction_info": ParseString, + "slave_skip_errors": ParseString, + "ssl_fips_mode": ParseString, + "transaction_write_set_extraction": ParseString, + "use_secondary_engine": ParseString, } func ConvertGlobalStatus(key string, value sql.RawBytes) (interface{}, error) { diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 47189c18d1576..95083a1e5016f 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -2,6 +2,7 @@ package v2 import ( "database/sql" + "strings" "testing" "github.com/stretchr/testify/require" @@ -19,14 +20,14 @@ func TestConvertGlobalStatus(t *testing.T) { name: "default", key: "ssl_ctx_verify_depth", value: []byte("0"), - expected: int64(0), + expected: uint64(0), expectedErr: nil, }, { name: "overflow int64", key: "ssl_ctx_verify_depth", value: []byte("18446744073709551615"), - expected: int64(9223372036854775807), + expected: uint64(18446744073709551615), expectedErr: nil, }, { @@ -84,3 +85,43 @@ func TestCovertGlobalVariables(t *testing.T) { }) } } + +func TestParseValue(t *testing.T) { + testCases := []struct { + rawByte sql.RawBytes + output interface{} + err string + }{ + {sql.RawBytes("123"), int64(123), ""}, + {sql.RawBytes("abc"), "abc", ""}, + {sql.RawBytes("10.1"), 10.1, ""}, + {sql.RawBytes("ON"), 1, ""}, + {sql.RawBytes("OFF"), 0, ""}, + {sql.RawBytes("NO"), 0, ""}, + {sql.RawBytes("YES"), 1, ""}, + {sql.RawBytes("No"), 0, ""}, + {sql.RawBytes("Yes"), 1, ""}, + {sql.RawBytes("-794"), int64(-794), ""}, + {sql.RawBytes("2147483647"), int64(2147483647), ""}, // max int32 + {sql.RawBytes("2147483648"), int64(2147483648), ""}, // too big for int32 + {sql.RawBytes("9223372036854775807"), int64(9223372036854775807), ""}, // max int64 + {sql.RawBytes("9223372036854775808"), uint64(9223372036854775808), ""}, // too big for int64 + {sql.RawBytes("18446744073709551615"), uint64(18446744073709551615), ""}, // max uint64 + {sql.RawBytes("18446744073709551616"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes(""), nil, "unconvertible value"}, + } + for _, cases := range testCases { + got, err := ParseValue(cases.rawByte) + + if err != nil && cases.err == "" { + t.Errorf("for %q got unexpected error: %q", string(cases.rawByte), err.Error()) + } else if err != nil && !strings.HasPrefix(err.Error(), cases.err) { + t.Errorf("for %q wanted error %q, got %q", string(cases.rawByte), cases.err, err.Error()) + } else if err == nil && cases.err != "" { + t.Errorf("for %q did not get expected error: %s", string(cases.rawByte), cases.err) + } else if got != cases.output { + t.Errorf("for %q wanted %#v (%T), got %#v (%T)", string(cases.rawByte), cases.output, cases.output, got, got) + } + } +} diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 1afb0046dc3a5..c9e99824d4de5 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -1,24 +1,26 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "path" "time" + gnatsd "github.com/nats-io/nats-server/v2/server" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - gnatsd "github.com/nats-io/nats-server/v2/server" ) type Nats struct { Server string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration client *http.Client } @@ -40,28 +42,28 @@ func (n *Nats) Description() string { } func (n *Nats) Gather(acc telegraf.Accumulator) error { - url, err := url.Parse(n.Server) + address, err := url.Parse(n.Server) if err != nil { return err } - url.Path = path.Join(url.Path, "varz") + address.Path = path.Join(address.Path, "varz") if n.client == nil { n.client = n.createHTTPClient() } - resp, err := n.client.Get(url.String()) + resp, err := n.client.Get(address.String()) if err != nil { return err } defer resp.Body.Close() - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) if err != nil { return err } stats := new(gnatsd.Varz) - err = json.Unmarshal([]byte(bytes), &stats) + err = json.Unmarshal(bytes, &stats) if err != nil { return err } @@ -93,7 +95,7 @@ func (n *Nats) createHTTPClient() *http.Client { transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, } - timeout := n.ResponseTimeout.Duration + timeout := time.Duration(n.ResponseTimeout) if timeout == time.Duration(0) { timeout = 5 * time.Second } diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go index 08d08ba760df0..f50ba2cfcf678 100644 --- a/plugins/inputs/nats/nats_freebsd.go +++ b/plugins/inputs/nats/nats_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd && !cgo // +build freebsd,!cgo package nats diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index ece22288ff9af..135951405feda 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -1,3 +1,4 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats @@ -69,12 +70,17 @@ var sampleVarz = ` func TestMetricsCorrect(t *testing.T) { var acc testutil.Accumulator - srv := newTestNatsServer() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/varz", "Cannot handle request") + + rsp := sampleVarz + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) + })) defer srv.Close() n := &Nats{Server: srv.URL} - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) fields := map[string]interface{}{ "in_msgs": int64(74148556), @@ -97,18 +103,3 @@ func TestMetricsCorrect(t *testing.T) { } acc.AssertContainsTaggedFields(t, "nats", fields, tags) } - -func newTestNatsServer() *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - - switch r.URL.Path { - case "/varz": - rsp = sampleVarz - default: - panic("Cannot handle request") - } - - fmt.Fprintln(w, rsp) - })) -} diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 057c77ee795c4..70c3287d12299 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -264,7 +264,7 @@ func (n *natsConsumer) Stop() { n.clean() } -func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { +func (n *natsConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index 8161ac7b4880a..a8934bd01ee94 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -5,7 +5,7 @@ package neptuneapex import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "math" "net/http" "strconv" @@ -14,7 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -51,7 +51,7 @@ type outlet struct { // NeptuneApex implements telegraf.Input. type NeptuneApex struct { Servers []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration httpClient *http.Client } @@ -245,7 +245,7 @@ func findProbe(probe string, probes []probe) int { // returns a time.Time struct. func parseTime(val string, tz float64) (time.Time, error) { // Magic time constant from https://golang.org/pkg/time/#Parse - const TimeLayout = "01/02/2006 15:04:05 -0700" + const timeLayout = "01/02/2006 15:04:05 -0700" // Timezone offset needs to be explicit sign := '+' @@ -256,7 +256,7 @@ func parseTime(val string, tz float64) (time.Time, error) { // Build a time string with the timezone in a format Go can parse. tzs := fmt.Sprintf("%c%04d", sign, int(math.Abs(tz))*100) ts := fmt.Sprintf("%s %s", val, tzs) - t, err := time.Parse(TimeLayout, ts) + t, err := time.Parse(timeLayout, ts) if err != nil { return time.Now(), fmt.Errorf("unable to parse %q (%v)", ts, err) } @@ -276,7 +276,7 @@ func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { url, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("unable to read output from %q: %v", url, err) } diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index cefa5fad14662..dd2bbeb3d9227 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -1,22 +1,23 @@ package neptuneapex import ( - "bytes" "context" "net" "net/http" "net/http/httptest" - "reflect" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) - w.Write([]byte("data")) + _, err := w.Write([]byte("data")) + require.NoError(t, err) }) c, destroy := fakeHTTPClient(h) defer destroy() @@ -46,12 +47,9 @@ func TestGather(t *testing.T) { t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator n.Servers = test.servers - n.Gather(&acc) - if len(acc.Errors) != len(test.servers) { - t.Errorf("Number of servers mismatch. got=%d, want=%d", - len(acc.Errors), len(test.servers)) - } - + require.NoError(t, n.Gather(&acc)) + require.Lenf(t, acc.Errors, len(test.servers), + "Number of servers mismatch. got=%d, want=%d", len(acc.Errors), len(test.servers)) }) } } @@ -63,33 +61,32 @@ func TestParseXML(t *testing.T) { tests := []struct { name string xmlResponse []byte - wantMetrics []*testutil.Metric + wantMetrics []telegraf.Metric wantAccErr bool wantErr bool }{ { name: "Good test", xmlResponse: []byte(APEX2016), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "type": "controller", "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "AC5:12345", "power_failed": int64(1544814000000000000), "power_restored": int64(1544833875000000000), }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "0", "device_id": "base_Var1", @@ -99,12 +96,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "PF1"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "PF1"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "6", "device_id": "base_email", @@ -114,12 +111,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "8", "device_id": "2_1", @@ -129,16 +126,16 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "state": "AON", "watt": 35.0, "amp": 0.3, }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "18", "device_id": "3_1", @@ -148,15 +145,15 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "state": "TBL", "xstatus": "OK", }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "28", "device_id": "4_9", @@ -166,12 +163,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "32", "device_id": "Cntl_A2", @@ -181,12 +178,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "name": "Salt", "type": "probe", @@ -194,20 +191,21 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"value": 30.1}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"value": 30.1}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "name": "Volt_2", "type": "probe", "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"value": 115.0}, - }, + map[string]interface{}{"value": 115.0}, + goodTime, + ), }, }, { @@ -226,21 +224,21 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_restored": int64(1545548137000000000), }, - }, + goodTime, + ), }, }, { @@ -249,21 +247,21 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545548137000000000), }, - }, + goodTime, + ), }, }, { @@ -283,22 +281,22 @@ func TestParseXML(t *testing.T) { o1Wabc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, { @@ -312,22 +310,22 @@ func TestParseXML(t *testing.T) { o1Aabc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, { @@ -340,22 +338,22 @@ func TestParseXML(t *testing.T) { p1abc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, } @@ -364,33 +362,17 @@ func TestParseXML(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator - err := n.parseXML(&acc, []byte(test.xmlResponse)) - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } + err := n.parseXML(&acc, test.xmlResponse) if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if len(acc.Errors) > 0 != test.wantAccErr { - t.Errorf("Accumulator errors. got=%v, want=none", acc.Errors) - } - if len(acc.Metrics) != len(test.wantMetrics) { - t.Fatalf("Invalid number of metrics received. got=%d, want=%d", len(acc.Metrics), len(test.wantMetrics)) - } - for i, m := range acc.Metrics { - if m.Measurement != test.wantMetrics[i].Measurement { - t.Errorf("Metric measurement mismatch at position %d:\ngot=\n%s\nWant=\n%s", i, m.Measurement, test.wantMetrics[i].Measurement) - } - if !reflect.DeepEqual(m.Tags, test.wantMetrics[i].Tags) { - t.Errorf("Metric tags mismatch at position %d:\ngot=\n%v\nwant=\n%v", i, m.Tags, test.wantMetrics[i].Tags) - } - if !reflect.DeepEqual(m.Fields, test.wantMetrics[i].Fields) { - t.Errorf("Metric fields mismatch at position %d:\ngot=\n%#v\nwant=:\n%#v", i, m.Fields, test.wantMetrics[i].Fields) - } - if !m.Time.Equal(test.wantMetrics[i].Time) { - t.Errorf("Metric time mismatch at position %d:\ngot=\n%s\nwant=\n%s", i, m.Time, test.wantMetrics[i].Time) - } - } + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Equalf(t, len(acc.Errors) > 0, test.wantAccErr, + "Accumulator errors. got=%v, want=%t", acc.Errors, test.wantAccErr) + + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), test.wantMetrics) }) } } @@ -424,7 +406,8 @@ func TestSendRequest(t *testing.T) { h := http.HandlerFunc(func( w http.ResponseWriter, r *http.Request) { w.WriteHeader(test.statusCode) - w.Write([]byte("data")) + _, err := w.Write([]byte("data")) + require.NoError(t, err) }) c, destroy := fakeHTTPClient(h) defer destroy() @@ -432,16 +415,14 @@ func TestSendRequest(t *testing.T) { httpClient: c, } resp, err := n.sendRequest("http://abc") - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if bytes.Compare(resp, []byte("data")) != 0 { - t.Errorf( - "Response data mismatch. got=%q, want=%q", resp, "data") - } + + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Equalf(t, resp, []byte("data"), "Response data mismatch. got=%q, want=%q", resp, "data") }) } } @@ -480,15 +461,14 @@ func TestParseTime(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() res, err := parseTime(test.input, test.timeZone) - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if !test.wantTime.Equal(res) { - t.Errorf("err mismatch. got=%s, want=%s", res, test.wantTime) - } + + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Truef(t, test.wantTime.Equal(res), "time mismatch. got=%q, want=%q", res, test.wantTime) }) } } @@ -524,27 +504,11 @@ func TestFindProbe(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() index := findProbe(test.probeName, fakeProbes) - if index != test.wantIndex { - t.Errorf("probe index mismatch; got=%d, want %d", index, test.wantIndex) - } + require.Equalf(t, index, test.wantIndex, "probe index mismatch; got=%d, want %d", index, test.wantIndex) }) } } -func TestDescription(t *testing.T) { - n := &NeptuneApex{} - if n.Description() == "" { - t.Errorf("Empty description") - } -} - -func TestSampleConfig(t *testing.T) { - n := &NeptuneApex{} - if n.SampleConfig() == "" { - t.Errorf("Empty sample config") - } -} - // This fakeHttpClient creates a server and binds a client to it. // That way, it is possible to control the http // output from within the test without changes to the main code. diff --git a/plugins/inputs/net/net.go b/plugins/inputs/net/net.go index f91501860e749..bb1621061ae9b 100644 --- a/plugins/inputs/net/net.go +++ b/plugins/inputs/net/net.go @@ -20,7 +20,7 @@ type NetIOStats struct { Interfaces []string } -func (_ *NetIOStats) Description() string { +func (n *NetIOStats) Description() string { return "Read metrics about network interface usage" } @@ -38,18 +38,18 @@ var netSampleConfig = ` ## ` -func (_ *NetIOStats) SampleConfig() string { +func (n *NetIOStats) SampleConfig() string { return netSampleConfig } -func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { - netio, err := s.ps.NetIO() +func (n *NetIOStats) Gather(acc telegraf.Accumulator) error { + netio, err := n.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %s", err) } - if s.filter == nil { - if s.filter, err = filter.Compile(s.Interfaces); err != nil { + if n.filter == nil { + if n.filter, err = filter.Compile(n.Interfaces); err != nil { return fmt.Errorf("error compiling filter: %s", err) } } @@ -64,17 +64,17 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { } for _, io := range netio { - if len(s.Interfaces) != 0 { + if len(n.Interfaces) != 0 { var found bool - if s.filter.Match(io.Name) { + if n.filter.Match(io.Name) { found = true } if !found { continue } - } else if !s.skipChecks { + } else if !n.skipChecks { iface, ok := interfacesByName[io.Name] if !ok { continue @@ -108,8 +108,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { // Get system wide stats for different network protocols // (ignore these stats if the call fails) - if !s.IgnoreProtocolStats { - netprotos, _ := s.ps.NetProto() + if !n.IgnoreProtocolStats { + netprotos, _ := n.ps.NetProto() fields := make(map[string]interface{}) for _, proto := range netprotos { for stat, value := range proto.Stats { diff --git a/plugins/inputs/net/netstat.go b/plugins/inputs/net/netstat.go index 555b396afd357..150f271a31b53 100644 --- a/plugins/inputs/net/netstat.go +++ b/plugins/inputs/net/netstat.go @@ -13,18 +13,18 @@ type NetStats struct { ps system.PS } -func (_ *NetStats) Description() string { +func (ns *NetStats) Description() string { return "Read TCP metrics such as established, time wait and sockets counts." } var tcpstatSampleConfig = "" -func (_ *NetStats) SampleConfig() string { +func (ns *NetStats) SampleConfig() string { return tcpstatSampleConfig } -func (s *NetStats) Gather(acc telegraf.Accumulator) error { - netconns, err := s.ps.NetConnections() +func (ns *NetStats) Gather(acc telegraf.Accumulator) error { + netconns, err := ns.ps.NetConnections() if err != nil { return fmt.Errorf("error getting net connections info: %s", err) } @@ -35,7 +35,7 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error { tags := map[string]string{} for _, netcon := range netconns { if netcon.Type == syscall.SOCK_DGRAM { - counts["UDP"] += 1 + counts["UDP"]++ continue // UDP has no status } c, ok := counts[netcon.Status] diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 023b4405e3609..043a3c44760ed 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -17,17 +17,17 @@ type ResultType uint64 const ( Success ResultType = 0 - Timeout = 1 - ConnectionFailed = 2 - ReadFailed = 3 - StringMismatch = 4 + Timeout ResultType = 1 + ConnectionFailed ResultType = 2 + ReadFailed ResultType = 3 + StringMismatch ResultType = 4 ) // NetResponse struct type NetResponse struct { Address string - Timeout internal.Duration - ReadTimeout internal.Duration + Timeout config.Duration + ReadTimeout config.Duration Send string Expect string Protocol string @@ -73,14 +73,14 @@ func (*NetResponse) SampleConfig() string { // TCPGather will execute if there are TCP tests defined in the configuration. // It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]interface{}) { +func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, error) { // Prepare returns - tags = make(map[string]string) - fields = make(map[string]interface{}) + tags := make(map[string]string) + fields := make(map[string]interface{}) // Start Timer start := time.Now() // Connecting - conn, err := net.DialTimeout("tcp", n.Address, n.Timeout.Duration) + conn, err := net.DialTimeout("tcp", n.Address, time.Duration(n.Timeout)) // Stop timer responseTime := time.Since(start).Seconds() // Handle error @@ -90,20 +90,24 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int } else { setResult(ConnectionFailed, fields, tags, n.Expect) } - return tags, fields + return tags, fields, nil } defer conn.Close() // Send string if needed if n.Send != "" { msg := []byte(n.Send) - conn.Write(msg) + if _, gerr := conn.Write(msg); gerr != nil { + return nil, nil, gerr + } // Stop timer responseTime = time.Since(start).Seconds() } // Read string if needed if n.Expect != "" { // Set read timeout - conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + if gerr := conn.SetReadDeadline(time.Now().Add(time.Duration(n.ReadTimeout))); gerr != nil { + return nil, nil, gerr + } // Prepare reader reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) @@ -116,8 +120,8 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int setResult(ReadFailed, fields, tags, n.Expect) } else { // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(data)) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(data) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -128,15 +132,15 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int setResult(Success, fields, tags, n.Expect) } fields["response_time"] = responseTime - return tags, fields + return tags, fields, nil } // UDPGather will execute if there are UDP tests defined in the configuration. // It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]interface{}) { +func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, error) { // Prepare returns - tags = make(map[string]string) - fields = make(map[string]interface{}) + tags := make(map[string]string) + fields := make(map[string]interface{}) // Start Timer start := time.Now() // Resolving @@ -144,22 +148,30 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } // Connecting conn, err := net.DialUDP("udp", nil, udpAddr) // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } defer conn.Close() // Send string msg := []byte(n.Send) - conn.Write(msg) + if _, gerr := conn.Write(msg); gerr != nil { + return nil, nil, gerr + } // Read string // Set read timeout - conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + if gerr := conn.SetReadDeadline(time.Now().Add(time.Duration(n.ReadTimeout))); gerr != nil { + return nil, nil, gerr + } // Read buf := make([]byte, 1024) _, _, err = conn.ReadFromUDP(buf) @@ -168,12 +180,14 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // Handle error if err != nil { setResult(ReadFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(buf)) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(string(buf)) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -182,7 +196,7 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int fields["response_time"] = responseTime - return tags, fields + return tags, fields, nil } // Gather is called by telegraf when the plugin is executed on its interval. @@ -190,18 +204,18 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // also fill an Accumulator that is supplied. func (n *NetResponse) Gather(acc telegraf.Accumulator) error { // Set default values - if n.Timeout.Duration == 0 { - n.Timeout.Duration = time.Second + if n.Timeout == 0 { + n.Timeout = config.Duration(time.Second) } - if n.ReadTimeout.Duration == 0 { - n.ReadTimeout.Duration = time.Second + if n.ReadTimeout == 0 { + n.ReadTimeout = config.Duration(time.Second) } // Check send and expected string if n.Protocol == "udp" && n.Send == "" { - return errors.New("Send string cannot be empty") + return errors.New("send string cannot be empty") } if n.Protocol == "udp" && n.Expect == "" { - return errors.New("Expected string cannot be empty") + return errors.New("expected string cannot be empty") } // Prepare host and port host, port, err := net.SplitHostPort(n.Address) @@ -212,22 +226,31 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { n.Address = "localhost:" + port } if port == "" { - return errors.New("Bad port") + return errors.New("bad port") } // Prepare data tags := map[string]string{"server": host, "port": port} var fields map[string]interface{} var returnTags map[string]string + // Gather data - if n.Protocol == "tcp" { - returnTags, fields = n.TCPGather() + switch n.Protocol { + case "tcp": + returnTags, fields, err = n.TCPGather() + if err != nil { + return err + } tags["protocol"] = "tcp" - } else if n.Protocol == "udp" { - returnTags, fields = n.UDPGather() + case "udp": + returnTags, fields, err = n.UDPGather() + if err != nil { + return err + } tags["protocol"] = "udp" - } else { - return errors.New("Bad protocol") + default: + return errors.New("bad protocol") } + // Merge the tags for k, v := range returnTags { tags[k] = v diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index ef4d0714a7a74..34a7992e3ddf1 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -6,27 +6,22 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSample(t *testing.T) { c := &NetResponse{} output := c.SampleConfig() - if output != sampleConfig { - t.Error("Sample config doesn't match") - } + require.Equal(t, output, sampleConfig, "Sample config doesn't match") } func TestDescription(t *testing.T) { c := &NetResponse{} output := c.Description() - if output != description { - t.Error("Description output is not correct") - } + require.Equal(t, output, description, "Description output is not correct") } func TestBadProtocol(t *testing.T) { var acc testutil.Accumulator @@ -36,9 +31,9 @@ func TestBadProtocol(t *testing.T) { Address: ":9999", } // Error - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "Bad protocol", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "bad protocol", err.Error()) } func TestNoPort(t *testing.T) { @@ -47,9 +42,9 @@ func TestNoPort(t *testing.T) { Protocol: "tcp", Address: ":", } - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "Bad port", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "bad port", err.Error()) } func TestAddressOnly(t *testing.T) { @@ -58,9 +53,9 @@ func TestAddressOnly(t *testing.T) { Protocol: "tcp", Address: "127.0.0.1", } - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "address 127.0.0.1: missing port in address", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "address 127.0.0.1: missing port in address", err.Error()) } func TestSendExpectStrings(t *testing.T) { @@ -77,12 +72,12 @@ func TestSendExpectStrings(t *testing.T) { Send: "toast", Expect: "", } - err1 := tc.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "Send string cannot be empty", err1.Error()) - err2 := uc.Gather(&acc) - require.Error(t, err2) - assert.Equal(t, "Expected string cannot be empty", err2.Error()) + err := tc.Gather(&acc) + require.Error(t, err) + require.Equal(t, "send string cannot be empty", err.Error()) + err = uc.Gather(&acc) + require.Error(t, err) + require.Equal(t, "expected string cannot be empty", err.Error()) } func TestTCPError(t *testing.T) { @@ -91,10 +86,10 @@ func TestTCPError(t *testing.T) { c := NetResponse{ Protocol: "tcp", Address: ":9999", + Timeout: config.Duration(time.Second * 30), } - // Error - err1 := c.Gather(&acc) - require.NoError(t, err1) + // Gather + require.NoError(t, c.Gather(&acc)) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -118,23 +113,23 @@ func TestTCPOK1(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "tcp", } // Start TCP server wg.Add(1) go TCPServer(t, &wg) - wg.Wait() - // Connect + wg.Wait() // Wait for the server to spin up wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -162,23 +157,24 @@ func TestTCPOK2(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test2", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "tcp", } // Start TCP server wg.Add(1) go TCPServer(t, &wg) wg.Wait() - // Connect wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -208,13 +204,14 @@ func TestUDPError(t *testing.T) { Protocol: "udp", } // Gather - err1 := c.Gather(&acc) + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } // Error - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -240,23 +237,24 @@ func TestUDPOK1(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "udp", } // Start UDP server wg.Add(1) go UDPServer(t, &wg) wg.Wait() - // Connect wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -277,25 +275,28 @@ func TestUDPOK1(t *testing.T) { } func UDPServer(t *testing.T, wg *sync.WaitGroup) { + defer wg.Done() udpAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:2004") conn, _ := net.ListenUDP("udp", udpAddr) wg.Done() buf := make([]byte, 1024) _, remoteaddr, _ := conn.ReadFromUDP(buf) - conn.WriteToUDP(buf, remoteaddr) - conn.Close() - wg.Done() + _, err := conn.WriteToUDP(buf, remoteaddr) + require.NoError(t, err) + require.NoError(t, conn.Close()) } func TCPServer(t *testing.T, wg *sync.WaitGroup) { + defer wg.Done() tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") tcpServer, _ := net.ListenTCP("tcp", tcpAddr) wg.Done() conn, _ := tcpServer.AcceptTCP() buf := make([]byte, 1024) - conn.Read(buf) - conn.Write(buf) - conn.CloseWrite() - tcpServer.Close() - wg.Done() + _, err := conn.Read(buf) + require.NoError(t, err) + _, err = conn.Write(buf) + require.NoError(t, err) + require.NoError(t, conn.CloseWrite()) + require.NoError(t, tcpServer.Close()) } diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md new file mode 100644 index 0000000000000..1ed1a08424bbb --- /dev/null +++ b/plugins/inputs/nfsclient/README.md @@ -0,0 +1,181 @@ +# NFS Client Input Plugin + +The NFS Client input plugin collects data from /proc/self/mountstats. By default, only a limited number of general system-level metrics are collected, including basic read/write counts. +If `fullstat` is set, a great deal of additional metrics are collected, detailed below. + +**NOTE** Many of the metrics, even if tagged with a mount point, are really _per-server_. Thus, if you mount these two shares: `nfs01:/vol/foo/bar` and `nfs01:/vol/foo/baz`, there will be two near identical entries in /proc/self/mountstats. This is a limitation of the metrics exposed by the kernel, not the telegraf plugin. + +### Configuration + +```toml +[[inputs.nfsclient]] + ## Read more low-level metrics (optional, defaults to false) + # fullstat = false + + ## List of mounts to explictly include or exclude (optional) + ## The pattern (Go regexp) is matched against the mount point (not the + ## device being mounted). If include_mounts is set, all mounts are ignored + ## unless present in the list. If a mount is listed in both include_mounts + ## and exclude_mounts, it is excluded. Go regexp patterns can be used. + # include_mounts = [] + # exclude_mounts = [] + + ## List of operations to include or exclude from collecting. This applies + ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: + ## the default is to collect everything; when include_operations is set, only + ## those OPs are collected; when exclude_operations is set, all are collected + ## except those listed. If include and exclude are set, the OP is excluded. + ## See /proc/self/mountstats for a list of valid operations; note that + ## NFSv3 and NFSv4 have different lists. While it is not possible to + ## have different include/exclude lists for NFSv3/4, unused elements + ## in the list should be okay. It is possible to have different lists + ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## with their own lists. See "include_mounts" above, and be careful of + ## duplicate metrics. + # include_operations = [] + # exclude_operations = [] +``` +#### Configuration Options +- **fullstat** bool: Collect per-operation type metrics. Defaults to false. +- **include_mounts** list(string): gather metrics for only these mounts. Default is to watch all mounts. +- **exclude_mounts** list(string): gather metrics for all mounts, except those listed in this option. Excludes take precedence over includes. +- **include_operations** list(string): List of specific NFS operations to track. See /proc/self/mountstats (the "per-op statistics" section) for complete lists of valid options for NFSv3 and NFSV4. The default is to gather all metrics, but this is almost certainly *not* what you want (there are 22 operations for NFSv3, and well over 50 for NFSv4). A suggested 'minimal' list of operations to collect for basic usage: `['READ','WRITE','ACCESS','GETATTR','READDIR','LOOKUP','LOOKUP']` +- **exclude_operations** list(string): Gather all metrics, except those listed. Excludes take precedence over includes. + +*N.B.* the `include_mounts` and `exclude_mounts` arguments are both applied to the local mount location (e.g. /mnt/NFS), not the server export (e.g. nfsserver:/vol/NFS). Go regexp patterns can be used in either. + +#### References +1. [nfsiostat](http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=summary) +2. [net/sunrpc/stats.c - Linux source code](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/stats.c) +3. [What is in /proc/self/mountstats for NFS mounts: an introduction](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex) +4. [The xprt: data for NFS mounts in /proc/self/mountstats](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsXprt) + + + +### Metrics + +#### Fields + +- nfsstat + - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent *and* received, including overhead *and* payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) + - ops (integer, count) - The number of operations of this type executed. + - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) + - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. + - rtt (integer, miliseconds) - The round-trip time for operations. + +In addition enabling `fullstat` will make many more metrics available. + +#### Tags + +- All measurements have the following tags: + - mountpoint - The local mountpoint, for instance: "/var/www" + - serverexport - The full server export, for instance: "nfsserver.example.org:/export" + +- Measurements nfsstat and nfs_ops will also include: + - operation - the NFS operation in question. `READ` or `WRITE` for nfsstat, but potentially one of ~20 or ~50, depending on NFS version. A complete list of operations supported is visible in `/proc/self/mountstats`. + + + +### Additional metrics + +When `fullstat` is true, additional measurements are collected. Tags are the same as above. + +#### NFS Operations + +Most descriptions come from Reference [[3](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex)] and `nfs_iostat.h`. Field order and names are the same as in `/proc/self/mountstats` and the Kernel source. + +Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes occasionally. + +- nfs_bytes + - fields: + - normalreadbytes (int, bytes): Bytes read from the server via `read()` + - normalwritebytes (int, bytes): Bytes written to the server via `write()` + - directreadbytes (int, bytes): Bytes read with O_DIRECT set + - directwritebytes (int, bytes): Bytes written with O_DIRECT set + - serverreadbytes (int, bytes): Bytes read via NFS READ (via `mmap()`) + - serverwritebytes (int, bytes): Bytes written via NFS WRITE (via `mmap()`) + - readpages (int, count): Number of pages read + - writepages (int, count): Number of pages written + +- nfs_events (Per-event metrics) + - fields: + - inoderevalidates (int, count): How many times cached inode attributes have to be re-validated from the server. + - dentryrevalidates (int, count): How many times cached dentry nodes have to be re-validated. + - datainvalidates (int, count): How many times an inode had its cached data thrown out. + - attrinvalidates (int, count): How many times an inode has had cached inode attributes invalidated. + - vfsopen (int, count): How many times files or directories have been `open()`'d. + - vfslookup (int, count): How many name lookups in directories there have been. + - vfsaccess (int, count): Number of calls to `access()`. (formerly called "vfspermission") + - vfsupdatepage (int, count): Count of updates (and potential writes) to pages. + - vfsreadpage (int, count): Number of pages read. + - vfsreadpages (int, count): Count of how many times a _group_ of pages was read (possibly via `mmap()`?). + - vfswritepage (int, count): Number of pages written. + - vfswritepages (int, count): Count of how many times a _group_ of pages was written (possibly via `mmap()`?) + - vfsgetdents (int, count): Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") + - vfssetattr (int, count): How many times we've set attributes on inodes. + - vfsflush (int, count): Count of times pending writes have been forcibly flushed to the server. + - vfsfsync (int, count): Count of calls to `fsync()` on directories and files. + - vfslock (int, count): Number of times a lock was attempted on a file (regardless of success or not). + - vfsrelease (int, count): Number of calls to `close()`. + - congestionwait (int, count): Believe unused by the Linux kernel, but it is part of the NFS spec. + - setattrtrunc (int, count): How many times files have had their size truncated. + - extendwrite (int, count): How many times a file has been grown because you're writing beyond the existing end of the file. + - sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) + - shortreads (int, count): Number of times the NFS server returned less data than requested. + - shortwrites (int, count): Number of times NFS server reports it wrote less data than requested. + - delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused) + - pnfsreads (int, count): Count of NFS v4.1+ pNFS reads. + - pnfswrites (int, count): Count of NFS v4.1+ pNFS writes. + +- nfs_xprt_tcp + - fields: + - bind_count (int, count): Number of _completely new_ mounts to this server (sometimes 0?) + - connect_count (int, count): How many times the client has connected to the server in question + - connect_time (int, jiffies): How long the NFS client has spent waiting for its connection(s) to the server to be established. + - idle_time (int, seconds): How long (in seconds) since the NFS mount saw any RPC traffic. + - rpcsends (int, count): How many RPC requests this mount has sent to the server. + - rpcreceives (int, count): How many RPC replies this mount has received from the server. + - badxids (int, count): Count of XIDs sent by the server that the client doesn't know about. + - inflightsends (int, count): Number of outstanding requests; always >1. (See reference #4 for comment on this field) + - backlogutil (int, count): Cumulative backlog count + +- nfs_xprt_udp + - fields: + - [same as nfs_xprt_tcp, except for connect_count, connect_time, and idle_time] + +- nfs_ops + - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): + - ops (int, count): Total operations of this type. + - trans (int, count): Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). + - timeouts (int, count): Number of major timeouts. + - bytes_sent (int, count): Bytes received, including headers (should also be close to on-wire size). + - bytes_recv (int, count): Bytes sent, including headers (should be close to on-wire size). + - queue_time (int, milliseconds): Cumulative time a request waited in the queue before sending this OP type. + - response_time (int, milliseconds): Cumulative time waiting for a response for this OP type. + - total_time (int, milliseconds): Cumulative time a request waited in the queue before sending. + - errors (int, count): Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 + + +### Example Output +For basic metrics showing server-wise read and write data. +``` +nfsstat,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS ops=600i,retrans=1i,bytes=1207i,rtt=606i,exe=607i 1612651512000000000 +nfsstat,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS bytes=1407i,rtt=706i,exe=707i,ops=700i,retrans=1i 1612651512000000000 + +``` + +For `fullstat=true` metrics, which includes additional measurements for `nfs_bytes`, `nfs_events`, and `nfs_xprt_tcp` (and `nfs_xprt_udp` if present). +Additionally, per-OP metrics are collected, with examples for READ, LOOKUP, and NULL shown. +Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes as it changes periodically. + +``` +nfs_bytes,mountpoint=/home,serverexport=nfs01:/vol/home directreadbytes=0i,directwritebytes=0i,normalreadbytes=42648757667i,normalwritebytes=0i,readpages=10404603i,serverreadbytes=42617098139i,serverwritebytes=0i,writepages=0i 1608787697000000000 +nfs_events,mountpoint=/home,serverexport=nfs01:/vol/home attrinvalidates=116i,congestionwait=0i,datainvalidates=65i,delay=0i,dentryrevalidates=5911243i,extendwrite=0i,inoderevalidates=200378i,pnfsreads=0i,pnfswrites=0i,setattrtrunc=0i,shortreads=0i,shortwrites=0i,sillyrenames=0i,vfsaccess=7203852i,vfsflush=117405i,vfsfsync=0i,vfsgetdents=3368i,vfslock=0i,vfslookup=740i,vfsopen=157281i,vfsreadpage=16i,vfsreadpages=86874i,vfsrelease=155526i,vfssetattr=0i,vfsupdatepage=0i,vfswritepage=0i,vfswritepages=215514i 1608787697000000000 +nfs_xprt_tcp,mountpoint=/home,serverexport=nfs01:/vol/home backlogutil=0i,badxids=0i,bind_count=1i,connect_count=1i,connect_time=0i,idle_time=0i,inflightsends=15659826i,rpcreceives=2173896i,rpcsends=2173896i 1608787697000000000 + +nfs_ops,mountpoint=/NFS,operation=NULL,serverexport=1.2.3.4:/storage/NFS trans=0i,timeouts=0i,bytes_sent=0i,bytes_recv=0i,queue_time=0i,response_time=0i,total_time=0i,ops=0i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS bytes=1207i,timeouts=602i,total_time=607i,exe=607i,trans=601i,bytes_sent=603i,bytes_recv=604i,queue_time=605i,ops=600i,retrans=1i,rtt=606i,response_time=606i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=700i,bytes=1407i,exe=707i,trans=701i,timeouts=702i,response_time=706i,total_time=707i,retrans=1i,rtt=706i,bytes_sent=703i,bytes_recv=704i,queue_time=705i 1612651512000000000 +``` + + diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go new file mode 100644 index 0000000000000..543ba759f772c --- /dev/null +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -0,0 +1,504 @@ +package nfsclient + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type NFSClient struct { + Fullstat bool `toml:"fullstat"` + IncludeMounts []string `toml:"include_mounts"` + ExcludeMounts []string `toml:"exclude_mounts"` + IncludeOperations []string `toml:"include_operations"` + ExcludeOperations []string `toml:"exclude_operations"` + Log telegraf.Logger `toml:"-"` + nfs3Ops map[string]bool + nfs4Ops map[string]bool + mountstatsPath string +} + +const sampleConfig = ` + ## Read more low-level metrics (optional, defaults to false) + # fullstat = false + + ## List of mounts to explictly include or exclude (optional) + ## The pattern (Go regexp) is matched against the mount point (not the + ## device being mounted). If include_mounts is set, all mounts are ignored + ## unless present in the list. If a mount is listed in both include_mounts + ## and exclude_mounts, it is excluded. Go regexp patterns can be used. + # include_mounts = [] + # exclude_mounts = [] + + ## List of operations to include or exclude from collecting. This applies + ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: + ## the default is to collect everything; when include_operations is set, only + ## those OPs are collected; when exclude_operations is set, all are collected + ## except those listed. If include and exclude are set, the OP is excluded. + ## See /proc/self/mountstats for a list of valid operations; note that + ## NFSv3 and NFSv4 have different lists. While it is not possible to + ## have different include/exclude lists for NFSv3/4, unused elements + ## in the list should be okay. It is possible to have different lists + ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## with their own lists. See "include_mounts" above, and be careful of + ## duplicate metrics. + # include_operations = [] + # exclude_operations = [] +` + +func (n *NFSClient) SampleConfig() string { + return sampleConfig +} + +func (n *NFSClient) Description() string { + return "Read per-mount NFS client metrics from /proc/self/mountstats" +} + +func convertToUint64(line []string) ([]uint64, error) { + /* A "line" of input data (a pre-split array of strings) is + processed one field at a time. Each field is converted to + an uint64 value, and appened to an array of return values. + On an error, check for ErrRange, and returns an error + if found. This situation indicates a pretty major issue in + the /proc/self/mountstats file, and returning faulty data + is worse than no data. Other errors are ignored, and append + whatever we got in the first place (probably 0). + Yes, this is ugly. */ + + var nline []uint64 + + if len(line) < 2 { + return nline, nil + } + + // Skip the first field; it's handled specially as the "first" variable + for _, l := range line[1:] { + val, err := strconv.ParseUint(l, 10, 64) + if err != nil { + if numError, ok := err.(*strconv.NumError); ok { + if numError.Err == strconv.ErrRange { + return nil, fmt.Errorf("errrange: line:[%v] raw:[%v] -> parsed:[%v]", line, l, val) + } + } + } + nline = append(nline, val) + } + return nline, nil +} + +func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, acc telegraf.Accumulator) error { + tags := map[string]string{"mountpoint": mountpoint, "serverexport": export} + nline, err := convertToUint64(line) + if err != nil { + return err + } + + if len(nline) == 0 { + n.Log.Warnf("Parsing Stat line with one field: %s\n", line) + return nil + } + + first := strings.Replace(line[0], ":", "", 1) + + var eventsFields = []string{ + "inoderevalidates", + "dentryrevalidates", + "datainvalidates", + "attrinvalidates", + "vfsopen", + "vfslookup", + "vfsaccess", + "vfsupdatepage", + "vfsreadpage", + "vfsreadpages", + "vfswritepage", + "vfswritepages", + "vfsgetdents", + "vfssetattr", + "vfsflush", + "vfsfsync", + "vfslock", + "vfsrelease", + "congestionwait", + "setattrtrunc", + "extendwrite", + "sillyrenames", + "shortreads", + "shortwrites", + "delay", + "pnfsreads", + "pnfswrites", + } + + var bytesFields = []string{ + "normalreadbytes", + "normalwritebytes", + "directreadbytes", + "directwritebytes", + "serverreadbytes", + "serverwritebytes", + "readpages", + "writepages", + } + + var xprtudpFields = []string{ + "bind_count", + "rpcsends", + "rpcreceives", + "badxids", + "inflightsends", + "backlogutil", + } + + var xprttcpFields = []string{ + "bind_count", + "connect_count", + "connect_time", + "idle_time", + "rpcsends", + "rpcreceives", + "badxids", + "inflightsends", + "backlogutil", + } + + var nfsopFields = []string{ + "ops", + "trans", + "timeouts", + "bytes_sent", + "bytes_recv", + "queue_time", + "response_time", + "total_time", + "errors", + } + + var fields = make(map[string]interface{}) + + switch first { + case "READ", "WRITE": + fields["ops"] = nline[0] + fields["retrans"] = nline[1] - nline[0] + fields["bytes"] = nline[3] + nline[4] + fields["rtt"] = nline[6] + fields["exe"] = nline[7] + tags["operation"] = first + acc.AddFields("nfsstat", fields, tags) + } + + if n.Fullstat { + switch first { + case "events": + if len(nline) >= len(eventsFields) { + for i, t := range eventsFields { + fields[t] = nline[i] + } + acc.AddFields("nfs_events", fields, tags) + } + + case "bytes": + if len(nline) >= len(bytesFields) { + for i, t := range bytesFields { + fields[t] = nline[i] + } + acc.AddFields("nfs_bytes", fields, tags) + } + + case "xprt": + if len(line) > 1 { + switch line[1] { + case "tcp": + if len(nline)+2 >= len(xprttcpFields) { + for i, t := range xprttcpFields { + fields[t] = nline[i+2] + } + acc.AddFields("nfs_xprt_tcp", fields, tags) + } + case "udp": + if len(nline)+2 >= len(xprtudpFields) { + for i, t := range xprtudpFields { + fields[t] = nline[i+2] + } + acc.AddFields("nfs_xprt_udp", fields, tags) + } + } + } + } + + if (version == "3" && n.nfs3Ops[first]) || (version == "4" && n.nfs4Ops[first]) { + tags["operation"] = first + if len(nline) <= len(nfsopFields) { + for i, t := range nline { + fields[nfsopFields[i]] = t + } + acc.AddFields("nfs_ops", fields, tags) + } + } + } + + return nil +} + +func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) error { + var mount string + var version string + var export string + var skip bool + + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + lineLength := len(line) + + if lineLength == 0 { + continue + } + + skip = false + + // This denotes a new mount has been found, so set + // mount and export, and stop skipping (for now) + if lineLength > 4 && choice.Contains("fstype", line) && (choice.Contains("nfs", line) || choice.Contains("nfs4", line)) { + mount = line[4] + export = line[1] + } else if lineLength > 5 && (choice.Contains("(nfs)", line) || choice.Contains("(nfs4)", line)) { + version = strings.Split(line[5], "/")[1] + } + + if mount == "" { + continue + } + + if len(n.IncludeMounts) > 0 { + skip = true + for _, RE := range n.IncludeMounts { + matched, _ := regexp.MatchString(RE, mount) + if matched { + skip = false + break + } + } + } + + if !skip && len(n.ExcludeMounts) > 0 { + for _, RE := range n.ExcludeMounts { + matched, _ := regexp.MatchString(RE, mount) + if matched { + skip = true + break + } + } + } + + if !skip { + err := n.parseStat(mount, export, version, line, acc) + if err != nil { + return fmt.Errorf("could not parseStat: %w", err) + } + } + } + + return nil +} + +func (n *NFSClient) getMountStatsPath() string { + path := "/proc/self/mountstats" + if os.Getenv("MOUNT_PROC") != "" { + path = os.Getenv("MOUNT_PROC") + } + n.Log.Debugf("using [%s] for mountstats", path) + return path +} + +func (n *NFSClient) Gather(acc telegraf.Accumulator) error { + file, err := os.Open(n.mountstatsPath) + if err != nil { + n.Log.Errorf("Failed opening the [%s] file: %s ", file, err) + return err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if err := n.processText(scanner, acc); err != nil { + return err + } + + if err := scanner.Err(); err != nil { + n.Log.Errorf("%s", err) + return err + } + + return nil +} + +func (n *NFSClient) Init() error { + var nfs3Fields = []string{ + "NULL", + "GETATTR", + "SETATTR", + "LOOKUP", + "ACCESS", + "READLINK", + "READ", + "WRITE", + "CREATE", + "MKDIR", + "SYMLINK", + "MKNOD", + "REMOVE", + "RMDIR", + "RENAME", + "LINK", + "READDIR", + "READDIRPLUS", + "FSSTAT", + "FSINFO", + "PATHCONF", + "COMMIT", + } + + var nfs4Fields = []string{ + "NULL", + "READ", + "WRITE", + "COMMIT", + "OPEN", + "OPEN_CONFIRM", + "OPEN_NOATTR", + "OPEN_DOWNGRADE", + "CLOSE", + "SETATTR", + "FSINFO", + "RENEW", + "SETCLIENTID", + "SETCLIENTID_CONFIRM", + "LOCK", + "LOCKT", + "LOCKU", + "ACCESS", + "GETATTR", + "LOOKUP", + "LOOKUP_ROOT", + "REMOVE", + "RENAME", + "LINK", + "SYMLINK", + "CREATE", + "PATHCONF", + "STATFS", + "READLINK", + "READDIR", + "SERVER_CAPS", + "DELEGRETURN", + "GETACL", + "SETACL", + "FS_LOCATIONS", + "RELEASE_LOCKOWNER", + "SECINFO", + "FSID_PRESENT", + "EXCHANGE_ID", + "CREATE_SESSION", + "DESTROY_SESSION", + "SEQUENCE", + "GET_LEASE_TIME", + "RECLAIM_COMPLETE", + "LAYOUTGET", + "GETDEVICEINFO", + "LAYOUTCOMMIT", + "LAYOUTRETURN", + "SECINFO_NO_NAME", + "TEST_STATEID", + "FREE_STATEID", + "GETDEVICELIST", + "BIND_CONN_TO_SESSION", + "DESTROY_CLIENTID", + "SEEK", + "ALLOCATE", + "DEALLOCATE", + "LAYOUTSTATS", + "CLONE", + "COPY", + "OFFLOAD_CANCEL", + "LOOKUPP", + "LAYOUTERROR", + "COPY_NOTIFY", + "GETXATTR", + "SETXATTR", + "LISTXATTRS", + "REMOVEXATTR", + } + + nfs3Ops := make(map[string]bool) + nfs4Ops := make(map[string]bool) + + n.mountstatsPath = n.getMountStatsPath() + + if len(n.IncludeOperations) == 0 { + for _, Op := range nfs3Fields { + nfs3Ops[Op] = true + } + for _, Op := range nfs4Fields { + nfs4Ops[Op] = true + } + } else { + for _, Op := range n.IncludeOperations { + nfs3Ops[Op] = true + } + for _, Op := range n.IncludeOperations { + nfs4Ops[Op] = true + } + } + + if len(n.ExcludeOperations) > 0 { + for _, Op := range n.ExcludeOperations { + if nfs3Ops[Op] { + delete(nfs3Ops, Op) + } + if nfs4Ops[Op] { + delete(nfs4Ops, Op) + } + } + } + + n.nfs3Ops = nfs3Ops + n.nfs4Ops = nfs4Ops + + if len(n.IncludeMounts) > 0 { + n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) + } else { + n.Log.Debugf("Including all mounts.") + } + + if len(n.ExcludeMounts) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeMounts) + } else { + n.Log.Debugf("Not excluding any mounts.") + } + + if len(n.IncludeOperations) > 0 { + n.Log.Debugf("Including these operations: %v", n.IncludeOperations) + } else { + n.Log.Debugf("Including all operations.") + } + + if len(n.ExcludeOperations) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeOperations) + } else { + n.Log.Debugf("Not excluding any operations.") + } + + return nil +} + +func init() { + inputs.Add("nfsclient", func() telegraf.Input { + return &NFSClient{} + }) +} diff --git a/plugins/inputs/nfsclient/nfsclient_test.go b/plugins/inputs/nfsclient/nfsclient_test.go new file mode 100644 index 0000000000000..961c0f34c8d75 --- /dev/null +++ b/plugins/inputs/nfsclient/nfsclient_test.go @@ -0,0 +1,205 @@ +package nfsclient + +import ( + "bufio" + "os" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func getMountStatsPath() string { + path := "./testdata/mountstats" + if os.Getenv("MOUNT_PROC") != "" { + path = os.Getenv("MOUNT_PROC") + } + + return path +} + +func TestNFSClientParsev3(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{Fullstat: true} + nfsclient.nfs3Ops = map[string]bool{"READLINK": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"READLINK": true, "GETATTR": false} + data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507") + err := nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, &acc) + require.NoError(t, err) + + fieldsOps := map[string]interface{}{ + "ops": uint64(500), + "trans": uint64(501), + "timeouts": uint64(502), + "bytes_sent": uint64(503), + "bytes_recv": uint64(504), + "queue_time": uint64(505), + "response_time": uint64(506), + "total_time": uint64(507), + } + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) +} + +func TestNFSClientParsev4(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{Fullstat: true} + nfsclient.nfs3Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} + data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507") + err := nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) + require.NoError(t, err) + + fieldsOps := map[string]interface{}{ + "ops": uint64(500), + "trans": uint64(501), + "timeouts": uint64(502), + "bytes_sent": uint64(503), + "bytes_recv": uint64(504), + "queue_time": uint64(505), + "response_time": uint64(506), + "total_time": uint64(507), + } + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) +} + +func TestNFSClientParseLargeValue(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{Fullstat: true} + nfsclient.nfs3Ops = map[string]bool{"SETCLIENTID": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"SETCLIENTID": true, "GETATTR": false} + data := strings.Fields(" SETCLIENTID: 218 216 0 53568 12960 18446744073709531008 134 197") + err := nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) + require.NoError(t, err) + + fieldsOps := map[string]interface{}{ + "ops": uint64(218), + "trans": uint64(216), + "timeouts": uint64(0), + "bytes_sent": uint64(53568), + "bytes_recv": uint64(12960), + "queue_time": uint64(18446744073709531008), + "response_time": uint64(134), + "total_time": uint64(197), + } + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) +} + +func TestNFSClientProcessStat(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.Fullstat = false + + file, _ := os.Open(getMountStatsPath()) + defer file.Close() + + scanner := bufio.NewScanner(file) + + err := nfsclient.processText(scanner, &acc) + require.NoError(t, err) + + fieldsReadstat := map[string]interface{}{ + "ops": uint64(600), + "retrans": uint64(1), + "bytes": uint64(1207), + "rtt": uint64(606), + "exe": uint64(607), + } + + readTags := map[string]string{ + "serverexport": "1.2.3.4:/storage/NFS", + "mountpoint": "/A", + "operation": "READ", + } + + acc.AssertContainsTaggedFields(t, "nfsstat", fieldsReadstat, readTags) + + fieldsWritestat := map[string]interface{}{ + "ops": uint64(700), + "retrans": uint64(1), + "bytes": uint64(1407), + "rtt": uint64(706), + "exe": uint64(707), + } + + writeTags := map[string]string{ + "serverexport": "1.2.3.4:/storage/NFS", + "mountpoint": "/A", + "operation": "WRITE", + } + acc.AssertContainsTaggedFields(t, "nfsstat", fieldsWritestat, writeTags) +} + +func TestNFSClientProcessFull(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.Fullstat = true + + file, _ := os.Open(getMountStatsPath()) + defer file.Close() + + scanner := bufio.NewScanner(file) + + err := nfsclient.processText(scanner, &acc) + require.NoError(t, err) + + fieldsEvents := map[string]interface{}{ + "inoderevalidates": uint64(301736), + "dentryrevalidates": uint64(22838), + "datainvalidates": uint64(410979), + "attrinvalidates": uint64(26188427), + "vfsopen": uint64(27525), + "vfslookup": uint64(9140), + "vfsaccess": uint64(114420), + "vfsupdatepage": uint64(30785253), + "vfsreadpage": uint64(5308856), + "vfsreadpages": uint64(5364858), + "vfswritepage": uint64(30784819), + "vfswritepages": uint64(79832668), + "vfsgetdents": uint64(170), + "vfssetattr": uint64(64), + "vfsflush": uint64(18194), + "vfsfsync": uint64(29294718), + "vfslock": uint64(0), + "vfsrelease": uint64(18279), + "congestionwait": uint64(0), + "setattrtrunc": uint64(2), + "extendwrite": uint64(785551), + "sillyrenames": uint64(0), + "shortreads": uint64(0), + "shortwrites": uint64(0), + "delay": uint64(0), + "pnfsreads": uint64(0), + "pnfswrites": uint64(0), + } + fieldsBytes := map[string]interface{}{ + "normalreadbytes": uint64(204440464584), + "normalwritebytes": uint64(110857586443), + "directreadbytes": uint64(783170354688), + "directwritebytes": uint64(296174954496), + "serverreadbytes": uint64(1134399088816), + "serverwritebytes": uint64(407107155723), + "readpages": uint64(85749323), + "writepages": uint64(30784819), + } + fieldsXprtTCP := map[string]interface{}{ + "bind_count": uint64(1), + "connect_count": uint64(1), + "connect_time": uint64(0), + "idle_time": uint64(0), + "rpcsends": uint64(96172963), + "rpcreceives": uint64(96172963), + "badxids": uint64(0), + "inflightsends": uint64(620878754), + "backlogutil": uint64(0), + } + + acc.AssertContainsFields(t, "nfs_events", fieldsEvents) + acc.AssertContainsFields(t, "nfs_bytes", fieldsBytes) + acc.AssertContainsFields(t, "nfs_xprt_tcp", fieldsXprtTCP) +} diff --git a/plugins/inputs/nfsclient/testdata/mountstats b/plugins/inputs/nfsclient/testdata/mountstats new file mode 100644 index 0000000000000..86651d20d26fa --- /dev/null +++ b/plugins/inputs/nfsclient/testdata/mountstats @@ -0,0 +1,231 @@ +device rootfs mounted on / with fstype rootfs +device proc mounted on /proc with fstype proc +device sysfs mounted on /sys with fstype sysfs +device devtmpfs mounted on /dev with fstype devtmpfs +device devpts mounted on /dev/pts with fstype devpts +device tmpfs mounted on /dev/shm with fstype tmpfs +device /dev/loop0 mounted on /dev/.initramfs/live with fstype iso9660 +device /dev/loop6 mounted on / with fstype ext4 +device /proc/bus/usb mounted on /proc/bus/usb with fstype usbfs +device none mounted on /proc/sys/fs/binfmt_misc with fstype binfmt_misc +device /tmp mounted on /tmp with fstype tmpfs +device /home mounted on /home with fstype tmpfs +device /var mounted on /var with fstype tmpfs +device /etc mounted on /etc with fstype tmpfs +device /dev/ram1 mounted on /root with fstype ext2 +device cgroup mounted on /cgroup/cpuset with fstype cgroup +device cgroup mounted on /cgroup/cpu with fstype cgroup +device cgroup mounted on /cgroup/cpuacct with fstype cgroup +device cgroup mounted on /cgroup/memory with fstype cgroup +device cgroup mounted on /cgroup/devices with fstype cgroup +device cgroup mounted on /cgroup/freezer with fstype cgroup +device cgroup mounted on /cgroup/net_cls with fstype cgroup +device cgroup mounted on /cgroup/blkio with fstype cgroup +device sunrpc mounted on /var/lib/nfs/rpc_pipefs with fstype rpc_pipefs +device /etc/auto.misc mounted on /misc with fstype autofs +device -hosts mounted on /net with fstype autofs +device 1.2.3.4:/storage/NFS mounted on /A with fstype nfs statvers=1.1 + opts: rw,vers=3,rsize=32768,wsize=32768,namlen=255,acregmin=60,acregmax=60,acdirmin=60,acdirmax=60,hard,nolock,noacl,nordirplus,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=1.2.3.4,mountvers=3,mountport=49193,mountproto=tcp,local_lock=all + age: 1136770 + caps: caps=0x3fe6,wtmult=512,dtsize=8192,bsize=0,namlen=255 + sec: flavor=1,pseudoflavor=1 + events: 301736 22838 410979 26188427 27525 9140 114420 30785253 5308856 5364858 30784819 79832668 170 64 18194 29294718 0 18279 0 2 785551 0 0 0 0 0 0 + bytes: 204440464584 110857586443 783170354688 296174954496 1134399088816 407107155723 85749323 30784819 + RPC iostats version: 1.0 p/v: 100003/3 (nfs) + xprt: tcp 733 1 1 0 0 96172963 96172963 0 620878754 0 690 196347132 524706275 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + GETATTR: 100 101 102 103 104 105 106 107 + SETATTR: 200 201 202 203 204 205 206 207 + LOOKUP: 300 301 302 303 304 305 306 307 + ACCESS: 400 401 402 403 404 405 406 407 + READLINK: 500 501 502 503 504 505 506 507 + READ: 600 601 602 603 604 605 606 607 + WRITE: 700 701 702 703 704 705 706 707 + CREATE: 800 801 802 803 804 805 806 807 + MKDIR: 900 901 902 903 904 905 906 907 + SYMLINK: 1000 1001 1002 1003 1004 1005 1006 1007 + MKNOD: 1100 1101 1102 1103 1104 1105 1106 1107 + REMOVE: 1200 1201 1202 1203 1204 1205 1206 1207 + RMDIR: 1300 1301 1302 1303 1304 1305 1306 1307 + RENAME: 1400 1401 1402 1403 1404 1405 1406 1407 + LINK: 1500 1501 1502 1503 1504 1505 1506 1507 + READDIR: 1600 1601 1602 1603 1604 1605 1606 1607 + READDIRPLUS: 1700 1701 1702 1703 1704 1705 1706 1707 + FSSTAT: 1800 1801 1802 1803 1804 1805 1806 1807 + FSINFO: 1900 1901 1902 1903 1904 1905 1906 1907 + PATHCONF: 2000 2001 2002 2003 2004 2005 2006 2007 + COMMIT: 2100 2101 2102 2103 2104 2105 2106 2107 + +device 2.2.2.2:/nfsdata/ mounted on /B with fstype nfs4 statvers=1.1 + opts: rw,vers=4,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60, acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys, clientaddr=3.3.3.3,minorversion=0,local_lock=none + age: 19 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,acl=0x0 + sec: flavor=1,pseudoflavor=1 + events: 0 168232 0 0 0 10095 217808 0 2 9797 0 9739 0 0 19739 19739 0 19739 0 0 0 0 0 0 0 0 0 + bytes: 1612840960 0 0 0 627536112 0 158076 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 737 0 1 0 0 69698 69697 0 81817 0 2 1082 12119 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 9797 9797 0 1000 2000 71 7953 8200 + WRITE: 0 0 0 0 0 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 + OPEN: 19740 19740 0 4737600 7343280 505 3449 4172 + OPEN_CONFIRM: 10211 10211 0 1552072 694348 74 836 1008 + OPEN_NOATTR: 0 0 0 0 0 0 0 0 + OPEN_DOWNGRADE: 0 0 0 0 0 0 0 0 + CLOSE: 19739 19739 0 3316152 2605548 334 3045 3620 + SETATTR: 0 0 0 0 0 0 0 0 + FSINFO: 1 1 0 132 108 0 0 0 + RENEW: 0 0 0 0 0 0 0 0 + SETCLIENTID: 0 0 0 0 0 0 0 0 + SETCLIENTID_CONFIRM: 0 0 0 0 0 0 0 0 + LOCK: 0 0 0 0 0 0 0 0 + LOCKT: 0 0 0 0 0 0 0 0 + LOCKU: 0 0 0 0 0 0 0 0 + ACCESS: 96 96 0 14584 19584 0 8 10 + GETATTR: 1 1 0 132 188 0 0 0 + LOOKUP: 10095 10095 0 1655576 2382420 36 898 1072 + LOOKUP_ROOT: 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 + PATHCONF: 1 1 0 128 72 0 0 0 + STATFS: 0 0 0 0 0 0 0 0 + READLINK: 0 0 0 0 0 0 0 0 + READDIR: 0 0 0 0 0 0 0 0 + SERVER_CAPS: 2 2 0 256 176 0 0 0 + DELEGRETURN: 0 0 0 0 0 0 0 0 + GETACL: 0 0 0 0 0 0 0 0 + SETACL: 0 0 0 0 0 0 0 0 + FS_LOCATIONS: 0 0 0 0 0 0 0 0 + RELEASE_LOCKOWNER: 0 0 0 0 0 0 0 0 + SECINFO: 0 0 0 0 0 0 0 0 + EXCHANGE_ID: 0 0 0 0 0 0 0 0 + CREATE_SESSION: 0 0 0 0 0 0 0 0 + DESTROY_SESSION: 500 501 502 503 504 505 506 507 + SEQUENCE: 0 0 0 0 0 0 0 0 + GET_LEASE_TIME: 0 0 0 0 0 0 0 0 + RECLAIM_COMPLETE: 0 0 0 0 0 0 0 0 + LAYOUTGET: 0 0 0 0 0 0 0 0 + GETDEVICEINFO: 0 0 0 0 0 0 0 0 + LAYOUTCOMMIT: 0 0 0 0 0 0 0 0 + +device nfsserver1:/vol/export1/bread_recipes mounted on /C with fstype nfs statvers=1.1 + opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=5.4.3.2,mountvers=3,mountport=635,mountproto=udp,local_lock=none + age: 1084700 + caps: caps=0x3fc7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + sec: flavor=1,pseudoflavor=1 + events: 145712 48345501 0 2476 804 1337 49359047 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + bytes: 0 0 0 0 0 0 0 0 + RPC iostats version: 1.0 p/v: 100003/3 (nfs) + xprt: tcp 871 1 1 0 0 181124336 181124308 28 1971647851 0 1100 807885669 90279840 + per-op statistics + NULL: 1 2 0 44 24 0 0 0 + GETATTR: 145712 145712 0 22994472 16319744 532 107480 109969 + SETATTR: 0 0 0 0 0 0 0 0 + LOOKUP: 2553 2553 0 385932 476148 9 1695 1739 + ACCESS: 596338 596338 0 79281020 71560560 2375 228286 237993 + READLINK: 0 0 0 0 0 0 0 0 + READ: 0 0 0 0 0 0 0 0 + WRITE: 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 + MKDIR: 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 + MKNOD: 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 + RMDIR: 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 + READDIR: 0 0 0 0 0 0 0 0 + READDIRPLUS: 0 0 0 0 0 0 0 0 + FSSTAT: 1698 1698 0 250080 285264 6 929 951 + FSINFO: 34 34 0 4352 5576 0 5 5 + PATHCONF: 1 1 0 128 140 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 + +device nfsserver2:/tank/os2warp mounted on /D with fstype nfs4 statvers=1.1 + opts: rw,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=10.66.88.239,local_lock=none + age: 2 + impl_id: name='',domain='',date='0,0' + caps: caps=0xffbfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0x40f9be3e,bm2=0x28803,acl=0x0,sessions,pnfs=not configured,lease_time=90,lease_expired=0 + sec: flavor=1,pseudoflavor=1 + events: 1 112 0 0 1 3 117 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + bytes: 0 0 0 0 0 0 0 0 + RPC iostats version: 1.1 p/v: 100003/4 (nfs) + xprt: tcp 763 0 2 0 2 39 39 0 42 0 2 0 3 + per-op statistics + NULL: 1 1 0 44 24 0 0 1 0 + READ: 0 0 0 0 0 0 0 0 0 + WRITE: 0 0 0 0 0 0 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 0 + OPEN: 0 0 0 0 0 0 0 0 0 + OPEN_CONFIRM: 0 0 0 0 0 0 0 0 0 + OPEN_NOATTR: 0 0 0 0 0 0 0 0 0 + OPEN_DOWNGRADE: 0 0 0 0 0 0 0 0 0 + CLOSE: 0 0 0 0 0 0 0 0 0 + SETATTR: 0 0 0 0 0 0 0 0 0 + FSINFO: 1 1 0 168 164 0 0 0 0 + RENEW: 0 0 0 0 0 0 0 0 0 + SETCLIENTID: 0 0 0 0 0 0 0 0 0 + SETCLIENTID_CONFIRM: 0 0 0 0 0 0 0 0 0 + LOCK: 0 0 0 0 0 0 0 0 0 + LOCKT: 0 0 0 0 0 0 0 0 0 + LOCKU: 0 0 0 0 0 0 0 0 0 + ACCESS: 3 3 0 600 504 0 1 1 0 + GETATTR: 2 2 0 364 480 0 1 1 0 + LOOKUP: 3 3 0 628 484 0 1 1 2 + LOOKUP_ROOT: 0 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 0 + PATHCONF: 1 1 0 160 116 0 0 0 0 + STATFS: 1 1 0 164 160 0 0 0 0 + READLINK: 0 0 0 0 0 0 0 0 0 + READDIR: 1 1 0 224 11968 0 1 1 0 + SERVER_CAPS: 2 2 0 336 328 0 1 1 0 + DELEGRETURN: 0 0 0 0 0 0 0 0 0 + GETACL: 0 0 0 0 0 0 0 0 0 + SETACL: 0 0 0 0 0 0 0 0 0 + FS_LOCATIONS: 0 0 0 0 0 0 0 0 0 + RELEASE_LOCKOWNER: 0 0 0 0 0 0 0 0 0 + SECINFO: 0 0 0 0 0 0 0 0 0 + FSID_PRESENT: 0 0 0 0 0 0 0 0 0 + EXCHANGE_ID: 2 2 0 480 200 0 2 2 0 + CREATE_SESSION: 1 1 0 200 124 0 0 0 0 + DESTROY_SESSION: 0 0 0 0 0 0 0 0 0 + SEQUENCE: 0 0 0 0 0 0 0 0 0 + GET_LEASE_TIME: 0 0 0 0 0 0 0 0 0 + RECLAIM_COMPLETE: 1 1 0 128 88 0 107 107 0 + LAYOUTGET: 0 0 0 0 0 0 0 0 0 + GETDEVICEINFO: 0 0 0 0 0 0 0 0 0 + LAYOUTCOMMIT: 0 0 0 0 0 0 0 0 0 + LAYOUTRETURN: 0 0 0 0 0 0 0 0 0 + SECINFO_NO_NAME: 0 0 0 0 0 0 0 0 0 + TEST_STATEID: 0 0 0 0 0 0 0 0 0 + FREE_STATEID: 0 0 0 0 0 0 0 0 0 + GETDEVICELIST: 0 0 0 0 0 0 0 0 0 + BIND_CONN_TO_SESSION: 0 0 0 0 0 0 0 0 0 + DESTROY_CLIENTID: 0 0 0 0 0 0 0 0 0 + SEEK: 0 0 0 0 0 0 0 0 0 + ALLOCATE: 0 0 0 0 0 0 0 0 0 + DEALLOCATE: 0 0 0 0 0 0 0 0 0 + LAYOUTSTATS: 0 0 0 0 0 0 0 0 0 + CLONE: 0 0 0 0 0 0 0 0 0 + COPY: 0 0 0 0 0 0 0 0 0 + OFFLOAD_CANCEL: 0 0 0 0 0 0 0 0 0 + LOOKUPP: 0 0 0 0 0 0 0 0 0 + LAYOUTERROR: 0 0 0 0 0 0 0 0 0 + COPY_NOTIFY: 0 0 0 0 0 0 0 0 0 + GETXATTR: 0 0 0 0 0 0 0 0 0 + SETXATTR: 0 0 0 0 0 0 0 0 0 + LISTXATTRS: 0 0 0 0 0 0 0 0 0 + REMOVEXATTR: 0 0 0 0 0 0 0 0 0 + LAYOUTRETURN: 0 0 0 0 0 0 0 0 diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 4834137542039..5e15022708682 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -12,14 +12,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type Nginx struct { Urls []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig // HTTP client @@ -55,7 +55,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { // Create an HTTP client that is re-used for each // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -72,7 +72,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -80,27 +80,27 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Nginx) createHttpClient() (*http.Client, error) { +func (n *Nginx) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil } -func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *Nginx) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index 7eb9e90b653ef..5a947e7e202e0 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -8,9 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const nginxSampleResponse = ` @@ -33,7 +33,7 @@ func TestNginxTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } @@ -46,10 +46,11 @@ func TestNginxGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/tengine_status" { rsp = tengineSampleResponse } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -61,16 +62,13 @@ func TestNginxGeneratesMetrics(t *testing.T) { Urls: []string{fmt.Sprintf("%s/tengine_status", ts.URL)}, } - var acc_nginx testutil.Accumulator - var acc_tengine testutil.Accumulator + var accNginx testutil.Accumulator + var accTengine testutil.Accumulator - err_nginx := acc_nginx.GatherError(n.Gather) - err_tengine := acc_tengine.GatherError(nt.Gather) + require.NoError(t, accNginx.GatherError(n.Gather)) + require.NoError(t, accTengine.GatherError(nt.Gather)) - require.NoError(t, err_nginx) - require.NoError(t, err_tengine) - - fields_nginx := map[string]interface{}{ + fieldsNginx := map[string]interface{}{ "active": uint64(585), "accepts": uint64(85340), "handled": uint64(85340), @@ -80,7 +78,7 @@ func TestNginxGeneratesMetrics(t *testing.T) { "waiting": uint64(446), } - fields_tengine := map[string]interface{}{ + fieldsTengine := map[string]interface{}{ "active": uint64(403), "accepts": uint64(853), "handled": uint64(8533), @@ -91,9 +89,7 @@ func TestNginxGeneratesMetrics(t *testing.T) { } addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { @@ -108,6 +104,6 @@ func TestNginxGeneratesMetrics(t *testing.T) { } tags := map[string]string{"server": host, "port": port} - acc_nginx.AssertContainsTaggedFields(t, "nginx", fields_nginx, tags) - acc_tengine.AssertContainsTaggedFields(t, "nginx", fields_tengine, tags) + accNginx.AssertContainsTaggedFields(t, "nginx", fieldsNginx, tags) + accTengine.AssertContainsTaggedFields(t, "nginx", fieldsTengine, tags) } diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 5b0fb2596ebf8..32a8516986f64 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -13,14 +13,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxPlus struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -56,7 +56,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -73,7 +73,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -81,9 +81,9 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NginxPlus) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 +func (n *NginxPlus) createHTTPClient() (*http.Client, error) { + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -95,13 +95,13 @@ func (n *NginxPlus) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil } -func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *NginxPlus) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Get(addr.String()) if err != nil { @@ -114,7 +114,7 @@ func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - return gatherStatusUrl(bufio.NewReader(resp.Body), getTags(addr), acc) + return gatherStatusURL(bufio.NewReader(resp.Body), getTags(addr), acc) default: return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType) } @@ -283,7 +283,7 @@ type Status struct { } `json:"stream"` } -func gatherStatusUrl(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { +func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { dec := json.NewDecoder(r) status := &Status{} if err := dec.Decode(status); err != nil { @@ -318,7 +318,6 @@ func (s *Status) gatherProcessesMetrics(tags map[string]string, acc telegraf.Acc }, tags, ) - } func (s *Status) gatherConnectionsMetrics(tags map[string]string, acc telegraf.Accumulator) { diff --git a/plugins/inputs/nginx_plus/nginx_plus_test.go b/plugins/inputs/nginx_plus/nginx_plus_test.go index 6e9a8c4d97c3e..36fe5a2dce8f6 100644 --- a/plugins/inputs/nginx_plus/nginx_plus_test.go +++ b/plugins/inputs/nginx_plus/nginx_plus_test.go @@ -253,14 +253,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -270,14 +269,11 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - err_nginx := n.Gather(&acc) - - require.NoError(t, err_nginx) + errNginx := n.Gather(&acc) + require.NoError(t, errNginx) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { @@ -409,5 +405,4 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { "upstream_address": "1.2.3.123:80", "id": "0", }) - } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index 8ec1ea0f7725f..09fe3fca3cb01 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -8,15 +8,15 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) -type NginxPlusApi struct { - Urls []string `toml:"urls"` - ApiVersion int64 `toml:"api_version"` - ResponseTimeout internal.Duration `toml:"response_timeout"` +type NginxPlusAPI struct { + Urls []string `toml:"urls"` + APIVersion int64 `toml:"api_version"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -24,7 +24,7 @@ type NginxPlusApi struct { const ( // Default settings - defaultApiVersion = 3 + defaultAPIVersion = 3 // Paths processesPath = "processes" @@ -61,26 +61,26 @@ var sampleConfig = ` # insecure_skip_verify = false ` -func (n *NginxPlusApi) SampleConfig() string { +func (n *NginxPlusAPI) SampleConfig() string { return sampleConfig } -func (n *NginxPlusApi) Description() string { +func (n *NginxPlusAPI) Description() string { return "Read Nginx Plus Api documentation" } -func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { +func (n *NginxPlusAPI) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup // Create an HTTP client that is re-used for each // collection interval - if n.ApiVersion == 0 { - n.ApiVersion = defaultApiVersion + if n.APIVersion == 0 { + n.APIVersion = defaultAPIVersion } if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -105,9 +105,9 @@ func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 +func (n *NginxPlusAPI) createHTTPClient() (*http.Client, error) { + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -119,7 +119,7 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil @@ -127,6 +127,6 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { func init() { inputs.Add("nginx_plus_api", func() telegraf.Input { - return &NginxPlusApi{} + return &NginxPlusAPI{} }) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 6aaaff2d344c7..81f747d86d825 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -19,19 +19,19 @@ var ( errNotFound = errors.New("not found") ) -func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { +func (n *NginxPlusAPI) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { addError(acc, n.gatherProcessesMetrics(addr, acc)) addError(acc, n.gatherConnectionsMetrics(addr, acc)) addError(acc, n.gatherSslMetrics(addr, acc)) - addError(acc, n.gatherHttpRequestsMetrics(addr, acc)) - addError(acc, n.gatherHttpServerZonesMetrics(addr, acc)) - addError(acc, n.gatherHttpUpstreamsMetrics(addr, acc)) - addError(acc, n.gatherHttpCachesMetrics(addr, acc)) + addError(acc, n.gatherHTTPRequestsMetrics(addr, acc)) + addError(acc, n.gatherHTTPServerZonesMetrics(addr, acc)) + addError(acc, n.gatherHTTPUpstreamsMetrics(addr, acc)) + addError(acc, n.gatherHTTPCachesMetrics(addr, acc)) addError(acc, n.gatherStreamServerZonesMetrics(addr, acc)) addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc)) - if n.ApiVersion >= 5 { - addError(acc, n.gatherHttpLocationZonesMetrics(addr, acc)) + if n.APIVersion >= 5 { + addError(acc, n.gatherHTTPLocationZonesMetrics(addr, acc)) addError(acc, n.gatherResolverZonesMetrics(addr, acc)) } } @@ -48,12 +48,12 @@ func addError(acc telegraf.Accumulator, err error) { } } -func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { - url := fmt.Sprintf("%s/%d/%s", addr.String(), n.ApiVersion, path) - resp, err := n.client.Get(url) +func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { + address := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) + resp, err := n.client.Get(address) if err != nil { - return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + return nil, fmt.Errorf("error making HTTP request to %s: %s", address, err) } defer resp.Body.Close() @@ -64,25 +64,25 @@ func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { // features are either optional, or only available in some versions return nil, errNotFound default: - return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + return nil, fmt.Errorf("%s returned HTTP status %s", address, resp.Status) } contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } return body, nil default: - return nil, fmt.Errorf("%s returned unexpected content type %s", url, contentType) + return nil, fmt.Errorf("%s returned unexpected content type %s", address, contentType) } } -func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, processesPath) +func (n *NginxPlusAPI) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, processesPath) if err != nil { return err } @@ -104,8 +104,8 @@ func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumu return nil } -func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, connectionsPath) +func (n *NginxPlusAPI) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, connectionsPath) if err != nil { return err } @@ -130,8 +130,8 @@ func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accu return nil } -func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, sslPath) +func (n *NginxPlusAPI) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, sslPath) if err != nil { return err } @@ -155,13 +155,13 @@ func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) return nil } -func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpRequestsPath) +func (n *NginxPlusAPI) gatherHTTPRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpRequestsPath) if err != nil { return err } - var httpRequests = &HttpRequests{} + var httpRequests = &HTTPRequests{} if err := json.Unmarshal(body, httpRequests); err != nil { return err @@ -179,13 +179,13 @@ func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Acc return nil } -func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpServerZonesPath) +func (n *NginxPlusAPI) gatherHTTPServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpServerZonesPath) if err != nil { return err } - var httpServerZones HttpServerZones + var httpServerZones HTTPServerZones if err := json.Unmarshal(body, &httpServerZones); err != nil { return err @@ -227,13 +227,13 @@ func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf. } // Added in 5 API version -func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpLocationZonesPath) +func (n *NginxPlusAPI) gatherHTTPLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpLocationZonesPath) if err != nil { return err } - var httpLocationZones HttpLocationZones + var httpLocationZones HTTPLocationZones if err := json.Unmarshal(body, &httpLocationZones); err != nil { return err @@ -273,13 +273,13 @@ func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegra return nil } -func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpUpstreamsPath) +func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpUpstreamsPath) if err != nil { return err } - var httpUpstreams HttpUpstreams + var httpUpstreams HTTPUpstreams if err := json.Unmarshal(body, &httpUpstreams); err != nil { return err @@ -357,13 +357,13 @@ func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Ac return nil } -func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpCachesPath) +func (n *NginxPlusAPI) gatherHTTPCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpCachesPath) if err != nil { return err } - var httpCaches HttpCaches + var httpCaches HTTPCaches if err := json.Unmarshal(body, &httpCaches); err != nil { return err @@ -411,8 +411,8 @@ func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accum return nil } -func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, streamServerZonesPath) +func (n *NginxPlusAPI) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, streamServerZonesPath) if err != nil { return err } @@ -447,8 +447,8 @@ func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegra } // Added in 5 API version -func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, resolverZonesPath) +func (n *NginxPlusAPI) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, resolverZonesPath) if err != nil { return err } @@ -490,8 +490,8 @@ func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Ac return nil } -func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, streamUpstreamsPath) +func (n *NginxPlusAPI) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, streamUpstreamsPath) if err != nil { return err } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index f309886cff58e..8f28772537288 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -520,7 +520,7 @@ const streamServerZonesPayload = ` ` func TestGatherProcessesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, processesPath, defaultApiVersion, processesPayload) + ts, n := prepareEndpoint(t, processesPath, processesPayload) defer ts.Close() var acc testutil.Accumulator @@ -541,7 +541,7 @@ func TestGatherProcessesMetrics(t *testing.T) { } func TestGatherConnectionsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, connectionsPath, defaultApiVersion, connectionsPayload) + ts, n := prepareEndpoint(t, connectionsPath, connectionsPayload) defer ts.Close() var acc testutil.Accumulator @@ -565,7 +565,7 @@ func TestGatherConnectionsMetrics(t *testing.T) { } func TestGatherSslMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, sslPath, defaultApiVersion, sslPayload) + ts, n := prepareEndpoint(t, sslPath, sslPayload) defer ts.Close() var acc testutil.Accumulator @@ -588,13 +588,13 @@ func TestGatherSslMetrics(t *testing.T) { } func TestGatherHttpRequestsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpRequestsPath, defaultApiVersion, httpRequestsPayload) + ts, n := prepareEndpoint(t, httpRequestsPath, httpRequestsPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPRequestsMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -610,13 +610,13 @@ func TestGatherHttpRequestsMetrics(t *testing.T) { } func TestGatherHttpServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpServerZonesPath, defaultApiVersion, httpServerZonesPayload) + ts, n := prepareEndpoint(t, httpServerZonesPath, httpServerZonesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPServerZonesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -664,13 +664,13 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) { } func TestGatherHttpLocationZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultApiVersion, httpLocationZonesPayload) + ts, n := prepareEndpoint(t, httpLocationZonesPath, httpLocationZonesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpLocationZonesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPLocationZonesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -716,13 +716,13 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) { } func TestGatherHttpUpstreamsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) + ts, n := prepareEndpoint(t, httpUpstreamsPath, httpUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPUpstreamsMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -888,13 +888,13 @@ func TestGatherHttpUpstreamsMetrics(t *testing.T) { } func TestGatherHttpCachesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpCachesPath, defaultApiVersion, httpCachesPayload) + ts, n := prepareEndpoint(t, httpCachesPath, httpCachesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPCachesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -966,7 +966,7 @@ func TestGatherHttpCachesMetrics(t *testing.T) { } func TestGatherResolverZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, resolverZonesPath, defaultApiVersion, resolverZonesPayload) + ts, n := prepareEndpoint(t, resolverZonesPath, resolverZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1020,7 +1020,7 @@ func TestGatherResolverZonesMetrics(t *testing.T) { } func TestGatherStreamUpstreams(t *testing.T) { - ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) + ts, n := prepareEndpoint(t, streamUpstreamsPath, streamUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator @@ -1159,11 +1159,10 @@ func TestGatherStreamUpstreams(t *testing.T) { "upstream_address": "10.0.0.1:12348", "id": "1", }) - } func TestGatherStreamServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, streamServerZonesPath, defaultApiVersion, streamServerZonesPayload) + ts, n := prepareEndpoint(t, streamServerZonesPath, streamServerZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1208,14 +1207,12 @@ func TestUnavailableEndpoints(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1228,14 +1225,12 @@ func TestServerError(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1245,18 +1240,17 @@ func TestServerError(t *testing.T) { func TestMalformedJSON(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintln(w, "this is not JSON") + _, err := fmt.Fprintln(w, "this is not JSON") + require.NoError(t, err) })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1269,14 +1263,12 @@ func TestUnknownContentType(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1286,9 +1278,7 @@ func TestUnknownContentType(t *testing.T) { func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { t.Helper() addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) @@ -1306,29 +1296,23 @@ func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { return addr, host, port } -func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) { +func prepareEndpoint(t *testing.T, path string, payload string) (*httptest.Server, *NginxPlusAPI) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - - if r.URL.Path == fmt.Sprintf("/api/%d/%s", apiVersion, path) { - rsp = payload - w.Header()["Content-Type"] = []string{"application/json"} - } else { - t.Errorf("unknown request path") - } + require.Equal(t, r.URL.Path, fmt.Sprintf("/api/%d/%s", defaultAPIVersion, path), "unknown request path") - fmt.Fprintln(w, rsp) + w.Header()["Content-Type"] = []string{"application/json"} + _, err := fmt.Fprintln(w, payload) + require.NoError(t, err) })) - n := &NginxPlusApi{ + n := &NginxPlusAPI{ Urls: []string{fmt.Sprintf("%s/api", ts.URL)}, - ApiVersion: apiVersion, + APIVersion: defaultAPIVersion, } - client, err := n.createHttpClient() - if err != nil { - t.Fatal(err) - } + client, err := n.createHTTPClient() + require.NoError(t, err) + n.client = client return ts, n diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go index 868bc04e445eb..51ada5fd9f46f 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go @@ -35,7 +35,7 @@ type ResolverZones map[string]struct { } `json:"responses"` } -type HttpRequests struct { +type HTTPRequests struct { Total int64 `json:"total"` Current int64 `json:"current"` } @@ -49,7 +49,7 @@ type ResponseStats struct { Total int64 `json:"total"` } -type HttpServerZones map[string]struct { +type HTTPServerZones map[string]struct { Processing int `json:"processing"` Requests int64 `json:"requests"` Responses ResponseStats `json:"responses"` @@ -58,7 +58,7 @@ type HttpServerZones map[string]struct { Sent int64 `json:"sent"` } -type HttpLocationZones map[string]struct { +type HTTPLocationZones map[string]struct { Requests int64 `json:"requests"` Responses ResponseStats `json:"responses"` Discarded *int64 `json:"discarded"` // added in version 6 @@ -73,7 +73,7 @@ type HealthCheckStats struct { LastPassed *bool `json:"last_passed"` } -type HttpUpstreams map[string]struct { +type HTTPUpstreams map[string]struct { Peers []struct { ID *int `json:"id"` // added in version 3 Server string `json:"server"` @@ -145,7 +145,7 @@ type ExtendedHitStats struct { BytesWritten int64 `json:"bytes_written"` } -type HttpCaches map[string]struct { // added in version 2 +type HTTPCaches map[string]struct { // added in version 2 Size int64 `json:"size"` MaxSize int64 `json:"max_size"` Cold bool `json:"cold"` diff --git a/plugins/inputs/nginx_sts/nginx_sts.go b/plugins/inputs/nginx_sts/nginx_sts.go index 046460069c65d..d3e9118577f6f 100644 --- a/plugins/inputs/nginx_sts/nginx_sts.go +++ b/plugins/inputs/nginx_sts/nginx_sts.go @@ -12,14 +12,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxSTS struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -81,8 +81,8 @@ func (n *NginxSTS) Gather(acc telegraf.Accumulator) error { } func (n *NginxSTS) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -94,7 +94,7 @@ func (n *NginxSTS) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_sts/nginx_sts_test.go b/plugins/inputs/nginx_sts/nginx_sts_test.go index 18081eadf7f43..9ebb5f91ad9d8 100644 --- a/plugins/inputs/nginx_sts/nginx_sts_test.go +++ b/plugins/inputs/nginx_sts/nginx_sts_test.go @@ -166,14 +166,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -184,13 +183,10 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator err := n.Gather(&acc) - require.NoError(t, err) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 0fe2907c9a08a..8ad8cc91e8a9e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -4,14 +4,13 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -55,7 +54,7 @@ type NginxUpstreamCheck struct { Method string `toml:"method"` Headers map[string]string `toml:"headers"` HostHeader string `toml:"host_header"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -67,7 +66,7 @@ func NewNginxUpstreamCheck() *NginxUpstreamCheck { Method: "GET", Headers: make(map[string]string), HostHeader: "", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -104,8 +103,8 @@ type NginxUpstreamCheckServer struct { Port uint16 `json:"port"` } -// createHttpClient create a clients to access API -func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { +// createHTTPClient create a clients to access API +func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { tlsConfig, err := check.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -115,15 +114,14 @@ func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: check.Timeout.Duration, + Timeout: time.Duration(check.Timeout), } return client, nil } -// gatherJsonData query the data source and parse the response JSON -func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) error { - +// gatherJSONData query the data source and parse the response JSON +func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{}) error { var method string if check.Method != "" { method = check.Method @@ -131,7 +129,7 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e method = "GET" } - request, err := http.NewRequest(method, url, nil) + request, err := http.NewRequest(method, address, nil) if err != nil { return err } @@ -154,8 +152,8 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -168,7 +166,7 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { if check.client == nil { - client, err := check.createHttpClient() + client, err := check.createHTTPClient() if err != nil { return err @@ -187,25 +185,23 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error } return nil - } -func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { +func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator telegraf.Accumulator) error { checkData := &NginxUpstreamCheckData{} - err := check.gatherJsonData(url, checkData) + err := check.gatherJSONData(address, checkData) if err != nil { return err } for _, server := range checkData.Servers.Server { - tags := map[string]string{ "upstream": server.Upstream, "type": server.Type, "name": server.Name, "port": strconv.Itoa(int(server.Port)), - "url": url, + "url": address, } fields := map[string]interface{}{ diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go index 1b70770d01075..353619b362228 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -45,14 +45,13 @@ func TestNginxUpstreamCheckData(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - if request.URL.Path == "/status" { - response = sampleStatusResponse - responseWriter.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } - - fmt.Fprintln(responseWriter, response) + require.Equal(test, request.URL.Path, "/status", "Cannot handle request") + + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(responseWriter, response) + require.NoError(test, err) })) defer testServer.Close() @@ -103,20 +102,18 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - if request.URL.Path == "/status" { - response = sampleStatusResponse - responseWriter.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(test, request.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(responseWriter, response) + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(responseWriter, response) + require.NoError(test, err) require.Equal(test, request.Method, "POST") require.Equal(test, request.Header.Get("X-Test"), "test-value") require.Equal(test, request.Header.Get("Authorization"), "Basic dXNlcjpwYXNzd29yZA==") require.Equal(test, request.Host, "status.local") - })) defer testServer.Close() diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go index 57453c0b4e3b0..bca7c62db9b83 100644 --- a/plugins/inputs/nginx_vts/nginx_vts.go +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -12,14 +12,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxVTS struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -81,8 +81,8 @@ func (n *NginxVTS) Gather(acc telegraf.Accumulator) error { } func (n *NginxVTS) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -94,7 +94,7 @@ func (n *NginxVTS) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_vts/nginx_vts_test.go b/plugins/inputs/nginx_vts/nginx_vts_test.go index 085fc38433dff..589bc634f9358 100644 --- a/plugins/inputs/nginx_vts/nginx_vts_test.go +++ b/plugins/inputs/nginx_vts/nginx_vts_test.go @@ -203,14 +203,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -221,13 +220,10 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator err := n.Gather(&acc) - require.NoError(t, err) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index 3c5d2695dcb33..6c8998129cf90 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -11,27 +11,26 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) +type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) // NSD is used to store configuration values type NSD struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool Server string ConfigFile string - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/nsd-control" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## Address of server to connect to, optionally ':port'. Defaults to the @@ -62,32 +61,32 @@ func (s *NSD) SampleConfig() string { } // Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) { +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server string, configFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} - if Server != "" { - host, port, err := net.SplitHostPort(Server) + if server != "" { + host, port, err := net.SplitHostPort(server) if err == nil { - Server = host + "@" + port + server = host + "@" + port } - cmdArgs = append([]string{"-s", Server}, cmdArgs...) + cmdArgs = append([]string{"-s", server}, cmdArgs...) } - if ConfigFile != "" { - cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + if configFile != "" { + cmdArgs = append([]string{"-c", configFile}, cmdArgs...) } cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running nsd-control: %s (%s %v)", err, cmdName, cmdArgs) } @@ -120,7 +119,7 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { fieldValue, err := strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v", + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) continue } @@ -128,14 +127,14 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { if strings.HasPrefix(stat, "server") { statTokens := strings.Split(stat, ".") if len(statTokens) > 1 { - serverId := strings.TrimPrefix(statTokens[0], "server") - if _, err := strconv.Atoi(serverId); err == nil { + serverID := strings.TrimPrefix(statTokens[0], "server") + if _, err := strconv.Atoi(serverID); err == nil { serverTokens := statTokens[1:] field := strings.Join(serverTokens[:], "_") - if fieldsServers[serverId] == nil { - fieldsServers[serverId] = make(map[string]interface{}) + if fieldsServers[serverID] == nil { + fieldsServers[serverID] = make(map[string]interface{}) } - fieldsServers[serverId][field] = fieldValue + fieldsServers[serverID][field] = fieldValue } } } else { @@ -145,8 +144,8 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { } acc.AddFields("nsd", fields, nil) - for thisServerId, thisServerFields := range fieldsServers { - thisServerTag := map[string]string{"server": thisServerId} + for thisServerID, thisServerFields := range fieldsServers { + thisServerTag := map[string]string{"server": thisServerID} acc.AddFields("nsd_servers", thisServerFields, thisServerTag) } diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index ee527f7b7f0b2..74f4a14cf96fa 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -3,17 +3,15 @@ package nsd import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server string, ConfigFile string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { +func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { + return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,21 +19,20 @@ func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server s func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &NSD{ - run: NSDControl(fullOutput, TestTimeout, true, "", ""), + run: NSDControl(fullOutput), } err := v.Gather(acc) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, acc.HasMeasurement("nsd")) - assert.True(t, acc.HasMeasurement("nsd_servers")) + require.True(t, acc.HasMeasurement("nsd")) + require.True(t, acc.HasMeasurement("nsd_servers")) - assert.Len(t, acc.Metrics, 2) - assert.Equal(t, 99, acc.NFields()) + require.Len(t, acc.Metrics, 2) + require.Equal(t, 99, acc.NFields()) acc.AssertContainsFields(t, "nsd", parsedFullOutput) acc.AssertContainsFields(t, "nsd_servers", parsedFullOutputServerAsTag) - } var parsedFullOutputServerAsTag = map[string]interface{}{ diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index fe941982646b1..58f60192b96d0 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -25,7 +25,7 @@ package nsq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -82,7 +82,7 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error { var err error if n.httpClient == nil { - n.httpClient, err = n.getHttpClient() + n.httpClient, err = n.getHTTPClient() if err != nil { return err } @@ -101,7 +101,7 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NSQ) getHttpClient() (*http.Client, error) { +func (n *NSQ) getHTTPClient() (*http.Client, error) { tlsConfig, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -111,7 +111,7 @@ func (n *NSQ) getHttpClient() (*http.Client, error) { } httpClient := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return httpClient, nil } @@ -123,7 +123,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { } r, err := n.httpClient.Get(u.String()) if err != nil { - return fmt.Errorf("Error while polling %s: %s", u.String(), err) + return fmt.Errorf("error while polling %s: %s", u.String(), err) } defer r.Body.Close() @@ -131,22 +131,22 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { - return fmt.Errorf(`Error reading body: %s`, err) + return fmt.Errorf(`error reading body: %s`, err) } data := &NSQStatsData{} err = json.Unmarshal(body, data) if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return fmt.Errorf(`error parsing response: %s`, err) } // Data was not parsed correctly attempt to use old format. if len(data.Version) < 1 { wrapper := &NSQStats{} err = json.Unmarshal(body, wrapper) if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return fmt.Errorf(`error parsing response: %s`, err) } data = &wrapper.Data } @@ -176,7 +176,7 @@ func buildURL(e string) (*url.URL, error) { u := fmt.Sprintf(requestPattern, e) addr, err := url.Parse(u) if err != nil { - return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) + return nil, fmt.Errorf("unable to parse address '%s': %s", u, err) } return addr, nil } diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index 23af13a4c82bc..03ebeaed65382 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -15,7 +15,8 @@ import ( func TestNSQStatsV1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responseV1) + _, err := fmt.Fprintln(w, responseV1) + require.NoError(t, err) })) defer ts.Close() @@ -271,7 +272,8 @@ var responseV1 = ` func TestNSQStatsPreV1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responsePreV1) + _, err := fmt.Fprintln(w, responsePreV1) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 2c25cce7d8114..34360472ab0b9 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -2,6 +2,7 @@ package nsq_consumer import ( "context" + "fmt" "sync" "github.com/influxdata/telegraf" @@ -21,7 +22,7 @@ type logger struct { log telegraf.Logger } -func (l *logger) Output(calldepth int, s string) error { +func (l *logger) Output(_ int, s string) error { l.log.Debug(s) return nil } @@ -102,7 +103,9 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { ctx, cancel := context.WithCancel(context.Background()) n.cancel = cancel - n.connect() + if err := n.connect(); err != nil { + return err + } n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) @@ -132,10 +135,29 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { return nil })) + // For backward compatibility + if n.Server != "" { + n.Nsqd = append(n.Nsqd, n.Server) + } + + // Check if we have anything to connect to + if len(n.Nsqlookupd) == 0 && len(n.Nsqd) == 0 { + return fmt.Errorf("either 'nsqd' or 'nsqlookupd' needs to be specified") + } + if len(n.Nsqlookupd) > 0 { - n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) + err := n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) + if err != nil && err != nsq.ErrAlreadyConnected { + return err + } + } + + if len(n.Nsqd) > 0 { + err := n.consumer.ConnectToNSQDs(n.Nsqd) + if err != nil && err != nsq.ErrAlreadyConnected { + return err + } } - n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server)) n.wg.Add(1) go func() { @@ -179,7 +201,7 @@ func (n *NSQConsumer) Stop() { } // Gather is a noop -func (n *NSQConsumer) Gather(acc telegraf.Accumulator) error { +func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index e07b125ccdb8f..4c6d944746440 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -11,10 +11,11 @@ import ( "testing" "time" + "github.com/nsqio/go-nsq" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/nsqio/go-nsq" - "github.com/stretchr/testify/assert" ) // This test is modeled after the kafka consumer integration test @@ -22,18 +23,21 @@ func TestReadsMetricsFromNSQ(t *testing.T) { msgID := nsq.MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n")) + frameMsg, err := frameMessage(msg) + require.NoError(t, err) + script := []instruction{ // SUB {0, nsq.FrameTypeResponse, []byte("OK")}, // IDENTIFY {0, nsq.FrameTypeResponse, []byte("OK")}, - {20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, + {20 * time.Millisecond, nsq.FrameTypeMessage, frameMsg}, // needed to exit test {100 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") - newMockNSQD(script, addr.String()) + newMockNSQD(t, script, addr.String()) consumer := &NSQConsumer{ Log: testutil.Logger{}, @@ -48,27 +52,22 @@ func TestReadsMetricsFromNSQ(t *testing.T) { p, _ := parsers.NewInfluxParser() consumer.SetParser(p) var acc testutil.Accumulator - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") - if err := consumer.Start(&acc); err != nil { - t.Fatal(err.Error()) - } + require.Len(t, acc.Metrics, 0, "There should not be any points") + require.NoError(t, consumer.Start(&acc)) waitForPoint(&acc, t) - if len(acc.Metrics) == 1 { - point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ - "host": "server01", - "direction": "in", - "region": "us-west", - }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) - } else { - t.Errorf("No points found in accumulator, expected 1") - } - + require.Len(t, acc.Metrics, 1, "No points found in accumulator, expected 1") + + point := acc.Metrics[0] + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ + "host": "server01", + "direction": "in", + "region": "us-west", + }, point.Tags) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } // Waits for the metric that was sent to the kafka broker to arrive at the kafka @@ -78,6 +77,8 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { ticker := time.NewTicker(5 * time.Millisecond) defer ticker.Stop() counter := 0 + + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: @@ -91,16 +92,15 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { } } -func newMockNSQD(script []instruction, addr string) *mockNSQD { +func newMockNSQD(t *testing.T, script []instruction, addr string) *mockNSQD { n := &mockNSQD{ script: script, exitChan: make(chan int), } tcpListener, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err) - } + require.NoError(t, err, "listen (%s) failed", n.tcpAddr.String()) + n.tcpListener = tcpListener n.tcpAddr = tcpListener.Addr().(*net.TCPAddr) @@ -141,6 +141,7 @@ func (n *mockNSQD) handle(conn net.Conn) { buf := make([]byte, 4) _, err := io.ReadFull(conn, buf) if err != nil { + //nolint:revive // log.Fatalf called intentionally log.Fatalf("ERROR: failed to read protocol version - %s", err) } @@ -173,14 +174,14 @@ func (n *mockNSQD) handle(conn net.Conn) { l := make([]byte, 4) _, err := io.ReadFull(rdr, l) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } size := int32(binary.BigEndian.Uint32(l)) b := make([]byte, size) _, err = io.ReadFull(rdr, b) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } case bytes.Equal(params[0], []byte("RDY")): @@ -202,9 +203,14 @@ func (n *mockNSQD) handle(conn net.Conn) { } rdyCount-- } - _, err := conn.Write(framedResponse(inst.frameType, inst.body)) + buf, err := framedResponse(inst.frameType, inst.body) + if err != nil { + log.Print(err.Error()) + goto exit + } + _, err = conn.Write(buf) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } scriptTime = time.After(n.script[idx+1].delay) @@ -213,11 +219,14 @@ func (n *mockNSQD) handle(conn net.Conn) { } exit: + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive n.tcpListener.Close() + //nolint:errcheck,revive conn.Close() } -func framedResponse(frameType int32, data []byte) []byte { +func framedResponse(frameType int32, data []byte) ([]byte, error) { var w bytes.Buffer beBuf := make([]byte, 4) @@ -226,21 +235,21 @@ func framedResponse(frameType int32, data []byte) []byte { binary.BigEndian.PutUint32(beBuf, size) _, err := w.Write(beBuf) if err != nil { - return nil + return nil, err } binary.BigEndian.PutUint32(beBuf, uint32(frameType)) _, err = w.Write(beBuf) if err != nil { - return nil + return nil, err } - w.Write(data) - return w.Bytes() + _, err = w.Write(data) + return w.Bytes(), err } -func frameMessage(m *nsq.Message) []byte { +func frameMessage(m *nsq.Message) ([]byte, error) { var b bytes.Buffer - m.WriteTo(&b) - return b.Bytes() + _, err := m.WriteTo(&b) + return b.Bytes(), err } diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index e6dcb420f30ce..b5ada855479c9 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -2,7 +2,6 @@ package nstat import ( "bytes" - "io/ioutil" "os" "strconv" @@ -18,18 +17,18 @@ var ( // default file paths const ( - NET_NETSTAT = "/net/netstat" - NET_SNMP = "/net/snmp" - NET_SNMP6 = "/net/snmp6" - NET_PROC = "/proc" + NetNetstat = "/net/netstat" + NetSnmp = "/net/snmp" + NetSnmp6 = "/net/snmp6" + NetProc = "/proc" ) // env variable names const ( - ENV_NETSTAT = "PROC_NET_NETSTAT" - ENV_SNMP = "PROC_NET_SNMP" - ENV_SNMP6 = "PROC_NET_SNMP6" - ENV_ROOT = "PROC_ROOT" + EnvNetstat = "PROC_NET_NETSTAT" + EnvSnmp = "PROC_NET_SNMP" + EnvSnmp6 = "PROC_NET_SNMP6" + EnvRoot = "PROC_ROOT" ) type Nstat struct { @@ -62,93 +61,72 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { // load paths, get from env if config values are empty ns.loadPaths() - netstat, err := ioutil.ReadFile(ns.ProcNetNetstat) + netstat, err := os.ReadFile(ns.ProcNetNetstat) if err != nil { return err } // collect netstat data - err = ns.gatherNetstat(netstat, acc) - if err != nil { - return err - } + ns.gatherNetstat(netstat, acc) // collect SNMP data - snmp, err := ioutil.ReadFile(ns.ProcNetSNMP) - if err != nil { - return err - } - err = ns.gatherSNMP(snmp, acc) + snmp, err := os.ReadFile(ns.ProcNetSNMP) if err != nil { return err } + ns.gatherSNMP(snmp, acc) // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) - snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) + snmp6, err := os.ReadFile(ns.ProcNetSNMP6) if err == nil { - err = ns.gatherSNMP6(snmp6, acc) - if err != nil { - return err - } + ns.gatherSNMP6(snmp6, acc) } else if !os.IsNotExist(err) { return err } return nil } -func (ns *Nstat) gatherNetstat(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadUglyTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherNetstat(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadUglyTable(data) tags := map[string]string{ "name": "netstat", } acc.AddFields("nstat", metrics, tags) - return nil } -func (ns *Nstat) gatherSNMP(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadUglyTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherSNMP(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadUglyTable(data) tags := map[string]string{ "name": "snmp", } acc.AddFields("nstat", metrics, tags) - return nil } -func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadGoodTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadGoodTable(data) tags := map[string]string{ "name": "snmp6", } acc.AddFields("nstat", metrics, tags) - return nil } // loadPaths can be used to read paths firstly from config // if it is empty then try read from env variables func (ns *Nstat) loadPaths() { if ns.ProcNetNetstat == "" { - ns.ProcNetNetstat = proc(ENV_NETSTAT, NET_NETSTAT) + ns.ProcNetNetstat = proc(EnvNetstat, NetNetstat) } if ns.ProcNetSNMP == "" { - ns.ProcNetSNMP = proc(ENV_SNMP, NET_SNMP) + ns.ProcNetSNMP = proc(EnvSnmp, NetSnmp) } if ns.ProcNetSNMP6 == "" { - ns.ProcNetSNMP6 = proc(ENV_SNMP6, NET_SNMP6) + ns.ProcNetSNMP6 = proc(EnvSnmp6, NetSnmp6) } } // loadGoodTable can be used to parse string heap that // headers and values are arranged in right order -func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) { +func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} { entries := map[string]interface{}{} fields := bytes.Fields(table) var value int64 @@ -158,12 +136,12 @@ func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) for i := 0; i < len(fields); i = i + 2 { // counter is zero if bytes.Equal(fields[i+1], zeroByte) { - if !dumpZeros { - continue - } else { - entries[string(fields[i])] = int64(0) + if !ns.DumpZeros { continue } + + entries[string(fields[i])] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(fields[i+1]), 10, 64) @@ -171,12 +149,12 @@ func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) entries[string(fields[i])] = value } } - return entries, nil + return entries } // loadUglyTable can be used to parse string heap that // the headers and values are splitted with a newline -func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) { +func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} { entries := map[string]interface{}{} // split the lines by newline lines := bytes.Split(table, newLineByte) @@ -196,12 +174,12 @@ func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) for j := 1; j < len(headers); j++ { // counter is zero if bytes.Equal(metrics[j], zeroByte) { - if !dumpZeros { - continue - } else { - entries[string(append(prefix, headers[j]...))] = int64(0) + if !ns.DumpZeros { continue } + + entries[string(append(prefix, headers[j]...))] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(metrics[j]), 10, 64) @@ -210,7 +188,7 @@ func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) } } } - return entries, nil + return entries } // proc can be used to read file paths from env @@ -220,9 +198,9 @@ func proc(env, path string) string { return p } // try to read root path, or use default root path - root := os.Getenv(ENV_ROOT) + root := os.Getenv(EnvRoot) if root == "" { - root = NET_PROC + root = NetProc } return root + path } diff --git a/plugins/inputs/nstat/nstat_test.go b/plugins/inputs/nstat/nstat_test.go index 7f4c09ce4d4be..95b64777b08af 100644 --- a/plugins/inputs/nstat/nstat_test.go +++ b/plugins/inputs/nstat/nstat_test.go @@ -12,11 +12,8 @@ func TestLoadUglyTable(t *testing.T) { "IpExtInCEPkts": int64(2660494435), } - got, err := loadUglyTable([]byte(uglyStr), true) - if err != nil { - t.Fatal(err) - } - + n := Nstat{DumpZeros: true} + got := n.loadUglyTable([]byte(uglyStr)) if len(got) == 0 { t.Fatalf("want %+v, got %+v", parsed, got) } @@ -40,10 +37,8 @@ func TestLoadGoodTable(t *testing.T) { "Ip6InDelivers": int64(62), "Ip6InMcastOctets": int64(1242966), } - got, err := loadGoodTable([]byte(goodStr), true) - if err != nil { - t.Fatal(err) - } + n := Nstat{DumpZeros: true} + got := n.loadGoodTable([]byte(goodStr)) if len(got) == 0 { t.Fatalf("want %+v, got %+v", parsed, got) } diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index 80b5dcd0f16be..6b924fc52298a 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -14,7 +14,7 @@ import ( ) // Mapping of ntpq header names to tag keys -var tagHeaders map[string]string = map[string]string{ +var tagHeaders = map[string]string{ "remote": "remote", "refid": "refid", "st": "stratum", @@ -50,7 +50,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // Due to problems with a parsing, we have to use regexp expression in order // to remove string that starts from '(' and ends with space // see: https://github.com/influxdata/telegraf/issues/2386 - reg, err := regexp.Compile("\\s+\\([\\S]*") + reg, err := regexp.Compile(`\s+\([\S]*`) if err != nil { return err } @@ -128,7 +128,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "h"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } // seconds in an hour @@ -137,7 +137,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "d"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } // seconds in a day @@ -146,7 +146,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "m"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } // seconds in a day @@ -157,7 +157,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.Atoi(fields[index]) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } mFields[key] = int64(m) @@ -174,7 +174,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.ParseFloat(fields[index], 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing float: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing float: %s", fields[index])) continue } mFields[key] = m diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index b0db77e45784f..54d4e10e717ac 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSingleNTPQ(t *testing.T) { @@ -20,7 +20,7 @@ func TestSingleNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -49,7 +49,7 @@ func TestBadIntNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -77,7 +77,7 @@ func TestBadFloatNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(2), @@ -105,7 +105,7 @@ func TestDaysNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(172800), @@ -134,7 +134,7 @@ func TestHoursNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(7200), @@ -163,7 +163,7 @@ func TestMinutesNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(120), @@ -192,7 +192,7 @@ func TestBadWhenNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(256), @@ -222,7 +222,7 @@ func TestParserNTPQ(t *testing.T) { n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(64), @@ -285,7 +285,7 @@ func TestMultiNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "delay": float64(54.033), @@ -329,7 +329,7 @@ func TestBadHeaderNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -357,7 +357,7 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -378,13 +378,13 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { func TestFailedNTPQ(t *testing.T) { tt := tester{ ret: []byte(singleNTPQ), - err: fmt.Errorf("Test failure"), + err: fmt.Errorf("test failure"), } n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) } // It is possible for the output of ntqp to be missing the refid column. This diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index bbe90e005c6d6..479634d7befb0 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -7,13 +7,19 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid ```toml # Pulls statistics from nvidia GPUs attached to the host [[inputs.nvidia_smi]] - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling # timeout = "5s" ``` +#### Linux + +On Linux, `nvidia-smi` is generally located at `/usr/bin/nvidia-smi` + #### Windows On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` @@ -52,6 +58,8 @@ You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program - `clocks_current_sm` (integer, MHz) - `clocks_current_memory` (integer, MHz) - `clocks_current_video` (integer, MHz) + - `driver_version` (string) + - `cuda_version` (string) ### Sample Query @@ -87,3 +95,5 @@ nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,psta ### Limitations Note that there seems to be an issue with getting current memory clock values when the memory is overclocked. This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti. + +**NOTE:** For use with docker either generate your own custom docker image based on nvidia/cuda which also installs a telegraf package or use [volume mount binding](https://docs.docker.com/storage/bind-mounts/) to inject the required binary into the docker container. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 688c3d4bb7680..68f25ba428611 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -19,7 +20,7 @@ const measurement = "nvidia_smi" // NvidiaSMI holds the methods for this plugin type NvidiaSMI struct { BinPath string - Timeout internal.Duration + Timeout config.Duration } // Description returns the description of the NvidiaSMI plugin @@ -30,7 +31,9 @@ func (smi *NvidiaSMI) Description() string { // SampleConfig returns the sample configuration for the NvidiaSMI plugin func (smi *NvidiaSMI) SampleConfig() string { return ` - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling @@ -38,12 +41,21 @@ func (smi *NvidiaSMI) SampleConfig() string { ` } -// Gather implements the telegraf interface -func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { +func (smi *NvidiaSMI) Init() error { if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { - return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath) + binPath, err := exec.LookPath("nvidia-smi") + // fail-fast + if err != nil { + return fmt.Errorf("nvidia-smi not found in %q and not in PATH; please make sure nvidia-smi is installed and/or is in PATH", smi.BinPath) + } + smi.BinPath = binPath } + return nil +} + +// Gather implements the telegraf interface +func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { data, err := smi.pollSMI() if err != nil { return err @@ -61,14 +73,14 @@ func init() { inputs.Add("nvidia_smi", func() telegraf.Input { return &NvidiaSMI{ BinPath: "/usr/bin/nvidia-smi", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), } }) } func (smi *NvidiaSMI) pollSMI() ([]byte, error) { // Construct and execute metrics query - ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), smi.Timeout.Duration) + ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), time.Duration(smi.Timeout)) if err != nil { return nil, err } @@ -109,6 +121,8 @@ func (s *SMI) genTagsFields() []metric { setTagIfUsed(tags, "uuid", gpu.UUID) setTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + setIfUsed("str", fields, "driver_version", s.DriverVersion) + setIfUsed("str", fields, "cuda_version", s.CUDAVersion) setIfUsed("int", fields, "fan_speed", gpu.FanSpeed) setIfUsed("int", fields, "memory_total", gpu.Memory.Total) setIfUsed("int", fields, "memory_used", gpu.Memory.Used) @@ -169,12 +183,18 @@ func setIfUsed(t string, m map[string]interface{}, k, v string) { m[k] = i } } + case "str": + if val != "" { + m[k] = val + } } } // SMI defines the structure for the output of _nvidia-smi -q -x_. type SMI struct { - GPU GPU `xml:"gpu"` + GPU GPU `xml:"gpu"` + DriverVersion string `xml:"driver_version"` + CUDAVersion string `xml:"cuda_version"` } // GPU defines the structure of the GPU portion of the smi output. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 3c191e609ade4..3c0b14d6e4559 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -69,6 +69,8 @@ func TestGatherValidXML(t *testing.T) { "clocks_current_memory": 405, "clocks_current_sm": 300, "clocks_current_video": 540, + "cuda_version": "10.1", + "driver_version": "418.43", "encoder_stats_average_fps": 0, "encoder_stats_average_latency": 0, "encoder_stats_session_count": 0, @@ -109,6 +111,8 @@ func TestGatherValidXML(t *testing.T) { "clocks_current_memory": 405, "clocks_current_sm": 139, "clocks_current_video": 544, + "cuda_version": "10.1", + "driver_version": "418.43", "encoder_stats_average_fps": 0, "encoder_stats_average_latency": 0, "encoder_stats_session_count": 0, @@ -135,7 +139,7 @@ func TestGatherValidXML(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherNvidiaSMI(octets, &acc) diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index 173d98b6fac98..d6530c0839b18 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -9,8 +9,8 @@ Plugin minimum tested version: 1.16 ```toml [[inputs.opcua]] - ## Device name - # name = "localhost" + ## Metric name + # name = "opcua" # ## OPC UA Endpoint URL # endpoint = "opc.tcp://localhost:4840" @@ -47,34 +47,97 @@ Plugin minimum tested version: 1.16 # password = "" # ## Node ID configuration - ## name - the variable name - ## namespace - integer value 0 thru 3 - ## identifier_type - s=string, i=numeric, g=guid, b=opaque - ## identifier - tag as shown in opcua browser - ## data_type - boolean, byte, short, int, uint, uint16, int16, - ## uint32, int32, float, double, string, datetime, number + ## name - field name to use in the output + ## namespace - OPC UA namespace of the node (integer value 0 thru 3) + ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) + ## identifier - OPC UA ID (tag as shown in opcua browser) + ## tags - extra tags to be added to the output metric (optional) ## Example: - ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} - nodes = [ - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - ] + ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + # + ## Node Group + ## Sets defaults for OPC UA namespace and ID type so they aren't required in + ## every node. A group can also have a metric name that overrides the main + ## plugin metric name. + ## + ## Multiple node groups are allowed + #[[inputs.opcua.group]] + ## Group Metric name. Overrides the top level name. If unset, the + ## top level name is used. + # name = + # + ## Group default namespace. If a node in the group doesn't set its + ## namespace, this is used. + # namespace = + # + ## Group default identifier type. If a node in the group doesn't set its + ## namespace, this is used. + # identifier_type = + # + ## Node ID Configuration. Array of nodes with the same settings as above. + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] ``` -### Example Node Configuration -An OPC UA node ID may resemble: "n=3,s=Temperature". In this example: +### Node Configuration +An OPC UA node ID may resemble: "n=3;s=Temperature". In this example: - n=3 is indicating the `namespace` is 3 - s=Temperature is indicting that the `identifier_type` is a string and `identifier` value is 'Temperature' -- This example temperature node has a value of 79.0, which makes the `data_type` a 'float'. +- This example temperature node has a value of 79.0 To gather data from this node enter the following line into the 'nodes' property above: ``` -{name="LabelName", namespace="3", identifier_type="s", identifier="Temperature", data_type="float", description="Description of node"}, +{field_name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, +``` + +This node configuration produces a metric like this: +``` +opcua,id=n\=3;s\=Temperature temp=79.0,quality="OK (0x0)" 1597820490000000000 + ``` +### Group Configuration +Groups can set default values for the namespace, identifier type, and +tags settings. The default values apply to all the nodes in the +group. If a default is set, a node may omit the setting altogether. +This simplifies node configuration, especially when many nodes share +the same namespace or identifier type. -### Example Output +The output metric will include tags set in the group and the node. If +a tag with the same name is set in both places, the tag value from the +node is used. +This example group configuration has two groups with two nodes each: +``` + [[inputs.opcua.group]] + name="group1_metric_name" + namespace="3" + identifier_type="i" + tags=[["group1_tag", "val1"]] + nodes = [ + {name="name", identifier="1001", tags=[["node1_tag", "val2"]]}, + {name="name", identifier="1002", tags=[["node1_tag", "val3"]]}, + ] + [[inputs.opcua.group]] + name="group2_metric_name" + namespace="3" + identifier_type="i" + tags=[["group2_tag", "val3"]] + nodes = [ + {name="saw", identifier="1003", tags=[["node2_tag", "val4"]]}, + {name="sin", identifier="1004"}, + ] ``` -opcua,host=3c70aee0901e,name=Random,type=double Random=0.018158170305814902 1597820490000000000 +It produces metrics like these: +``` +group1_metric_name,group1_tag=val1,id=ns\=3;i\=1001,node1_tag=val2 name=0,Quality="OK (0x0)" 1606893246000000000 +group1_metric_name,group1_tag=val1,id=ns\=3;i\=1002,node1_tag=val3 name=-1.389117,Quality="OK (0x0)" 1606893246000000000 +group2_metric_name,group2_tag=val3,id=ns\=3;i\=1003,node2_tag=val4 Quality="OK (0x0)",saw=-1.6 1606893246000000000 +group2_metric_name,group2_tag=val3,id=ns\=3;i\=1004 sin=1.902113,Quality="OK (0x0)" 1606893246000000000 ``` diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 87647e2b9d5f8..213dbd615a939 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -3,21 +3,23 @@ package opcua_client import ( "context" "fmt" - "log" "net/url" + "sort" "strings" "time" "github.com/gopcua/opcua" "github.com/gopcua/opcua/ua" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/selfstat" ) // OpcUA type type OpcUA struct { - Name string `toml:"name"` + MetricName string `toml:"name"` Endpoint string `toml:"endpoint"` SecurityPolicy string `toml:"security_policy"` SecurityMode string `toml:"security_mode"` @@ -28,18 +30,19 @@ type OpcUA struct { AuthMethod string `toml:"auth_method"` ConnectTimeout config.Duration `toml:"connect_timeout"` RequestTimeout config.Duration `toml:"request_timeout"` - NodeList []OPCTag `toml:"nodes"` - - Nodes []string `toml:"-"` - NodeData []OPCData `toml:"-"` - NodeIDs []*ua.NodeID `toml:"-"` - NodeIDerror []error `toml:"-"` + RootNodes []NodeSettings `toml:"nodes"` + Groups []GroupSettings `toml:"group"` + Log telegraf.Logger `toml:"-"` + + nodes []Node + nodeData []OPCData + nodeIDs []*ua.NodeID + nodeIDerror []error state ConnectionState // status - ReadSuccess int `toml:"-"` - ReadError int `toml:"-"` - NumberOfTags int `toml:"-"` + ReadSuccess selfstat.Stat `toml:"-"` + ReadError selfstat.Stat `toml:"-"` // internal values client *opcua.Client @@ -48,13 +51,29 @@ type OpcUA struct { } // OPCTag type -type OPCTag struct { - Name string `toml:"name"` - Namespace string `toml:"namespace"` - IdentifierType string `toml:"identifier_type"` - Identifier string `toml:"identifier"` - DataType string `toml:"data_type"` - Description string `toml:"description"` +type NodeSettings struct { + FieldName string `toml:"name"` + Namespace string `toml:"namespace"` + IdentifierType string `toml:"identifier_type"` + Identifier string `toml:"identifier"` + DataType string `toml:"data_type"` // Kept for backward compatibility but was never used. + Description string `toml:"description"` // Kept for backward compatibility but was never used. + TagsSlice [][]string `toml:"tags"` +} + +type Node struct { + tag NodeSettings + idStr string + metricName string + metricTags map[string]string +} + +type GroupSettings struct { + MetricName string `toml:"name"` // Overrides plugin's setting + Namespace string `toml:"namespace"` // Can be overridden by node setting + IdentifierType string `toml:"identifier_type"` // Can be overridden by node setting + Nodes []NodeSettings `toml:"nodes"` + TagsSlice [][]string `toml:"tags"` } // OPCData type @@ -81,9 +100,8 @@ const ( const description = `Retrieve data from OPCUA devices` const sampleConfig = ` -[[inputs.opcua]] - ## Device name - # name = "localhost" + ## Metric name + # name = "opcua" # ## OPC UA Endpoint URL # endpoint = "opc.tcp://localhost:4840" @@ -120,18 +138,41 @@ const sampleConfig = ` # password = "" # ## Node ID configuration - ## name - the variable name - ## namespace - integer value 0 thru 3 - ## identifier_type - s=string, i=numeric, g=guid, b=opaque - ## identifier - tag as shown in opcua browser - ## data_type - boolean, byte, short, int, uint, uint16, int16, - ## uint32, int32, float, double, string, datetime, number + ## name - field name to use in the output + ## namespace - OPC UA namespace of the node (integer value 0 thru 3) + ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) + ## identifier - OPC UA ID (tag as shown in opcua browser) ## Example: - ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} - nodes = [ - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - ] + ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + # + ## Node Group + ## Sets defaults for OPC UA namespace and ID type so they aren't required in + ## every node. A group can also have a metric name that overrides the main + ## plugin metric name. + ## + ## Multiple node groups are allowed + #[[inputs.opcua.group]] + ## Group Metric name. Overrides the top level name. If unset, the + ## top level name is used. + # name = + # + ## Group default namespace. If a node in the group doesn't set its + ## namespace, this is used. + # namespace = + # + ## Group default identifier type. If a node in the group doesn't set its + ## namespace, this is used. + # identifier_type = + # + ## Node ID Configuration. Array of nodes with the same settings as above. + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] ` // Description will appear directly above the plugin definition in the config file @@ -157,16 +198,23 @@ func (o *OpcUA) Init() error { if err != nil { return err } - o.NumberOfTags = len(o.NodeList) - o.setupOptions() + err = o.setupOptions() + if err != nil { + return err + } - return nil + tags := map[string]string{ + "endpoint": o.Endpoint, + } + o.ReadError = selfstat.Register("opcua", "read_error", tags) + o.ReadSuccess = selfstat.Register("opcua", "read_success", tags) + return nil } func (o *OpcUA) validateEndpoint() error { - if o.Name == "" { + if o.MetricName == "" { return fmt.Errorf("device name is empty") } @@ -182,24 +230,81 @@ func (o *OpcUA) validateEndpoint() error { //search security policy type switch o.SecurityPolicy { case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": - break + // Valid security policy type - do nothing. default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.Name) + return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.MetricName) } //search security mode type switch o.SecurityMode { case "None", "Sign", "SignAndEncrypt", "auto": - break + // Valid security mode type - do nothing. default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.Name) + return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.MetricName) } return nil } +func tagsSliceToMap(tags [][]string) (map[string]string, error) { + m := make(map[string]string) + for i, tag := range tags { + if len(tag) != 2 { + return nil, fmt.Errorf("tag %d needs 2 values, has %d: %v", i+1, len(tag), tag) + } + if tag[0] == "" { + return nil, fmt.Errorf("tag %d has empty name", i+1) + } + if tag[1] == "" { + return nil, fmt.Errorf("tag %d has empty value", i+1) + } + if _, ok := m[tag[0]]; ok { + return nil, fmt.Errorf("tag %d has duplicate key: %v", i+1, tag[0]) + } + m[tag[0]] = tag[1] + } + return m, nil +} + //InitNodes Method on OpcUA func (o *OpcUA) InitNodes() error { - if len(o.NodeList) == 0 { - return nil + for _, node := range o.RootNodes { + o.nodes = append(o.nodes, Node{ + metricName: o.MetricName, + tag: node, + }) + } + + for _, group := range o.Groups { + if group.MetricName == "" { + group.MetricName = o.MetricName + } + groupTags, err := tagsSliceToMap(group.TagsSlice) + if err != nil { + return err + } + for _, node := range group.Nodes { + if node.Namespace == "" { + node.Namespace = group.Namespace + } + if node.IdentifierType == "" { + node.IdentifierType = group.IdentifierType + } + nodeTags, err := tagsSliceToMap(node.TagsSlice) + if err != nil { + return err + } + mergedTags := make(map[string]string) + for k, v := range groupTags { + mergedTags[k] = v + } + for k, v := range nodeTags { + mergedTags[k] = v + } + o.nodes = append(o.nodes, Node{ + metricName: group.MetricName, + tag: node, + metricTags: mergedTags, + }) + } } err := o.validateOPCTags() @@ -210,50 +315,83 @@ func (o *OpcUA) InitNodes() error { return nil } +type metricParts struct { + metricName string + fieldName string + tags string // sorted by tag name and in format tag1=value1, tag2=value2 +} + +func newMP(n *Node) metricParts { + var keys []string + for key := range n.metricTags { + keys = append(keys, key) + } + sort.Strings(keys) + var sb strings.Builder + for i, key := range keys { + if i != 0 { + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString(", ") + } + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString(key) + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString("=") + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString(n.metricTags[key]) + } + x := metricParts{ + metricName: n.metricName, + fieldName: n.tag.FieldName, + tags: sb.String(), + } + return x +} + func (o *OpcUA) validateOPCTags() error { - nameEncountered := map[string]bool{} - for i, item := range o.NodeList { + nameEncountered := map[metricParts]struct{}{} + for _, node := range o.nodes { + mp := newMP(&node) //check empty name - if item.Name == "" { - return fmt.Errorf("empty name in '%s'", item.Name) + if node.tag.FieldName == "" { + return fmt.Errorf("empty name in '%s'", node.tag.FieldName) } //search name duplicate - if nameEncountered[item.Name] { - return fmt.Errorf("name '%s' is duplicated in '%s'", item.Name, item.Name) - } else { - nameEncountered[item.Name] = true + if _, ok := nameEncountered[mp]; ok { + return fmt.Errorf("name '%s' is duplicated (metric name '%s', tags '%s')", + mp.fieldName, mp.metricName, mp.tags) } + + //add it to the set + nameEncountered[mp] = struct{}{} + //search identifier type - switch item.IdentifierType { + switch node.tag.IdentifierType { case "s", "i", "g", "b": - break - default: - return fmt.Errorf("invalid identifier type '%s' in '%s'", item.IdentifierType, item.Name) - } - // search data type - switch item.DataType { - case "boolean", "byte", "short", "int", "uint", "uint16", "int16", "uint32", "int32", "float", "double", "string", "datetime", "number": - break + // Valid identifier type - do nothing. default: - return fmt.Errorf("invalid data type '%s' in '%s'", item.DataType, item.Name) + return fmt.Errorf("invalid identifier type '%s' in '%s'", node.tag.IdentifierType, node.tag.FieldName) } - // build nodeid - o.Nodes = append(o.Nodes, BuildNodeID(item)) + node.idStr = BuildNodeID(node.tag) //parse NodeIds and NodeIds errors - nid, niderr := ua.ParseNodeID(o.Nodes[i]) + nid, niderr := ua.ParseNodeID(node.idStr) // build NodeIds and Errors - o.NodeIDs = append(o.NodeIDs, nid) - o.NodeIDerror = append(o.NodeIDerror, niderr) + o.nodeIDs = append(o.nodeIDs, nid) + o.nodeIDerror = append(o.nodeIDerror, niderr) // Grow NodeData for later input - o.NodeData = append(o.NodeData, OPCData{}) + o.nodeData = append(o.nodeData, OPCData{}) } return nil } // BuildNodeID build node ID from OPC tag -func BuildNodeID(tag OPCTag) string { +func BuildNodeID(tag NodeSettings) string { return "ns=" + tag.Namespace + ";" + tag.IdentifierType + "=" + tag.Identifier } @@ -269,21 +407,25 @@ func Connect(o *OpcUA) error { o.state = Connecting if o.client != nil { - o.client.CloseSession() + if err := o.client.Close(); err != nil { + // Only log the error but to not bail-out here as this prevents + // reconnections for multiple parties (see e.g. #9523). + o.Log.Errorf("Closing connection failed: %v", err) + } } o.client = opcua.NewClient(o.Endpoint, o.opts...) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) defer cancel() if err := o.client.Connect(ctx); err != nil { - return fmt.Errorf("Error in Client Connection: %s", err) + return fmt.Errorf("error in Client Connection: %s", err) } regResp, err := o.client.RegisterNodes(&ua.RegisterNodesRequest{ - NodesToRegister: o.NodeIDs, + NodesToRegister: o.nodeIDs, }) if err != nil { - return fmt.Errorf("RegisterNodes failed: %v", err) + return fmt.Errorf("registerNodes failed: %v", err) } o.req = &ua.ReadRequest{ @@ -294,7 +436,7 @@ func Connect(o *OpcUA) error { err = o.getData() if err != nil { - return fmt.Errorf("Get Data Failed: %v", err) + return fmt.Errorf("get Data Failed: %v", err) } default: @@ -304,43 +446,48 @@ func Connect(o *OpcUA) error { } func (o *OpcUA) setupOptions() error { - + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) + defer cancel() // Get a list of the endpoints for our target server - endpoints, err := opcua.GetEndpoints(o.Endpoint) + endpoints, err := opcua.GetEndpoints(ctx, o.Endpoint) if err != nil { - log.Fatal(err) + return err } if o.Certificate == "" && o.PrivateKey == "" { if o.SecurityPolicy != "None" || o.SecurityMode != "None" { - o.Certificate, o.PrivateKey = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour)) + o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, 365*24*time.Hour) + if err != nil { + return err + } } } - o.opts = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout)) + o.opts, err = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout)) - return nil + return err } func (o *OpcUA) getData() error { resp, err := o.client.Read(o.req) if err != nil { - o.ReadError++ + o.ReadError.Incr(1) return fmt.Errorf("RegisterNodes Read failed: %v", err) } - o.ReadSuccess++ + o.ReadSuccess.Incr(1) for i, d := range resp.Results { + o.nodeData[i].Quality = d.Status if d.Status != ua.StatusOK { - return fmt.Errorf("Status not OK: %v", d.Status) + o.Log.Errorf("status not OK for node %v: %v", o.nodes[i].tag.FieldName, d.Status) + continue } - o.NodeData[i].TagName = o.NodeList[i].Name + o.nodeData[i].TagName = o.nodes[i].tag.FieldName if d.Value != nil { - o.NodeData[i].Value = d.Value.Value() - o.NodeData[i].DataType = d.Value.Type() + o.nodeData[i].Value = d.Value.Value() + o.nodeData[i].DataType = d.Value.Type() } - o.NodeData[i].Quality = d.Status - o.NodeData[i].TimeStamp = d.ServerTimestamp.String() - o.NodeData[i].Time = d.SourceTimestamp.String() + o.nodeData[i].TimeStamp = d.ServerTimestamp.String() + o.nodeData[i].Time = d.SourceTimestamp.String() } return nil } @@ -359,13 +506,11 @@ func disconnect(o *OpcUA) error { return err } - o.ReadError = 0 - o.ReadSuccess = 0 - switch u.Scheme { case "opc.tcp": o.state = Disconnected o.client.Close() + o.client = nil return nil default: return fmt.Errorf("invalid controller") @@ -388,20 +533,26 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { err := o.getData() if err != nil && o.state == Connected { o.state = Disconnected + // Ignore returned error to not mask the original problem + //nolint:errcheck,revive disconnect(o) return err } - for i, n := range o.NodeList { - fields := make(map[string]interface{}) - tags := map[string]string{ - "name": n.Name, - "id": BuildNodeID(n), + for i, n := range o.nodes { + if o.nodeData[i].Quality == ua.StatusOK { + fields := make(map[string]interface{}) + tags := map[string]string{ + "id": n.idStr, + } + for k, v := range n.metricTags { + tags[k] = v + } + + fields[o.nodeData[i].TagName] = o.nodeData[i].Value + fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) + acc.AddFields(n.metricName, fields, tags) } - - fields[o.NodeData[i].TagName] = o.NodeData[i].Value - fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.NodeData[i].Quality)) - acc.AddFields(o.Name, fields, tags) } return nil } @@ -410,7 +561,7 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("opcua", func() telegraf.Input { return &OpcUA{ - Name: "localhost", + MetricName: "opcua", Endpoint: "opc.tcp://localhost:4840", SecurityPolicy: "auto", SecurityMode: "auto", diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 637ac87bc0afa..27bfc1ecf4342 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -6,8 +6,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/config" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) type OPCTags struct { @@ -15,33 +17,35 @@ type OPCTags struct { Namespace string IdentifierType string Identifier string - DataType string - Want string + Want interface{} } -func TestClient1(t *testing.T) { +func TestClient1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } var testopctags = []OPCTags{ - {"ProductName", "0", "i", "2261", "string", "open62541 OPC UA Server"}, - {"ProductUri", "0", "i", "2262", "string", "http://open62541.org"}, - {"ManufacturerName", "0", "i", "2263", "string", "open62541"}, + {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, + {"ProductUri", "0", "i", "2262", "http://open62541.org"}, + {"ManufacturerName", "0", "i", "2263", "open62541"}, + {"badnode", "1", "i", "1337", nil}, + {"goodnode", "1", "s", "the.answer", "42"}, } var o OpcUA var err error - o.Name = "testing" - o.Endpoint = "opc.tcp://opcua.rocks:4840" + o.MetricName = "testing" + o.Endpoint = "opc.tcp://localhost:4840" o.AuthMethod = "Anonymous" o.ConnectTimeout = config.Duration(10 * time.Second) o.RequestTimeout = config.Duration(1 * time.Second) o.SecurityPolicy = "None" o.SecurityMode = "None" + o.Log = testutil.Logger{} for _, tags := range testopctags { - o.NodeList = append(o.NodeList, MapOPCTag(tags)) + o.RootNodes = append(o.RootNodes, MapOPCTag(tags)) } err = o.Init() if err != nil { @@ -52,26 +56,25 @@ func TestClient1(t *testing.T) { t.Fatalf("Connect Error: %s", err) } - for i, v := range o.NodeData { + for i, v := range o.nodeData { if v.Value != nil { types := reflect.TypeOf(v.Value) value := reflect.ValueOf(v.Value) compare := fmt.Sprintf("%v", value.Interface()) if compare != testopctags[i].Want { - t.Errorf("Tag %s: Values %v for type %s does not match record", o.NodeList[i].Name, value.Interface(), types) + t.Errorf("Tag %s: Values %v for type %s does not match record", o.nodes[i].tag.FieldName, value.Interface(), types) } - } else { - t.Errorf("Tag: %s has value: %v", o.NodeList[i].Name, v.Value) + } else if testopctags[i].Want != nil { + t.Errorf("Tag: %s has value: %v", o.nodes[i].tag.FieldName, v.Value) } } } -func MapOPCTag(tags OPCTags) (out OPCTag) { - out.Name = tags.Name +func MapOPCTag(tags OPCTags) (out NodeSettings) { + out.FieldName = tags.Name out.Namespace = tags.Namespace out.IdentifierType = tags.IdentifierType out.Identifier = tags.Identifier - out.DataType = tags.DataType return out } @@ -90,9 +93,21 @@ auth_method = "Anonymous" username = "" password = "" nodes = [ - {name="name", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="name2", namespace="", identifier_type="", identifier="", data_type="", description=""}, + {name="name", namespace="1", identifier_type="s", identifier="one"}, + {name="name2", namespace="2", identifier_type="s", identifier="two"}, ] +[[inputs.opcua.group]] +name = "foo" +namespace = "3" +identifier_type = "i" +tags = [["tag1", "val1"], ["tag2", "val2"]] +nodes = [{name="name3", identifier="3000", tags=[["tag3", "val3"]]}] +[[inputs.opcua.group]] +name = "bar" +namespace = "0" +identifier_type = "i" +tags = [["tag1", "val1"], ["tag2", "val2"]] +nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] ` c := config.NewConfig() @@ -104,7 +119,144 @@ nodes = [ o, ok := c.Inputs[0].Input.(*OpcUA) require.True(t, ok) - require.Len(t, o.NodeList, 2) - require.Equal(t, o.NodeList[0].Name, "name") - require.Equal(t, o.NodeList[1].Name, "name2") + require.Len(t, o.RootNodes, 2) + require.Equal(t, o.RootNodes[0].FieldName, "name") + require.Equal(t, o.RootNodes[1].FieldName, "name2") + + require.Len(t, o.Groups, 2) + require.Equal(t, o.Groups[0].MetricName, "foo") + require.Len(t, o.Groups[0].Nodes, 1) + require.Equal(t, o.Groups[0].Nodes[0].Identifier, "3000") + + require.NoError(t, o.InitNodes()) + require.Len(t, o.nodes, 4) + require.Len(t, o.nodes[2].metricTags, 3) + require.Len(t, o.nodes[3].metricTags, 2) +} + +func TestTagsSliceToMap(t *testing.T) { + m, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"baz", "bat"}}) + require.NoError(t, err) + require.Len(t, m, 2) + require.Equal(t, m["foo"], "bar") + require.Equal(t, m["baz"], "bat") +} + +func TestTagsSliceToMap_twoStrings(t *testing.T) { + var err error + _, err = tagsSliceToMap([][]string{{"foo", "bar", "baz"}}) + require.Error(t, err) + _, err = tagsSliceToMap([][]string{{"foo"}}) + require.Error(t, err) +} + +func TestTagsSliceToMap_dupeKey(t *testing.T) { + _, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"foo", "bat"}}) + require.Error(t, err) +} + +func TestTagsSliceToMap_empty(t *testing.T) { + _, err := tagsSliceToMap([][]string{{"foo", ""}}) + require.Equal(t, fmt.Errorf("tag 1 has empty value"), err) + _, err = tagsSliceToMap([][]string{{"", "bar"}}) + require.Equal(t, fmt.Errorf("tag 1 has empty name"), err) +} + +func TestValidateOPCTags(t *testing.T) { + tests := []struct { + name string + nodes []Node + err error + }{ + { + "same", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "v1", "t2": "v2"}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "v1", "t2": "v2"}, + }, + }, + fmt.Errorf("name 'fn' is duplicated (metric name 'mn', tags 't1=v1, t2=v2')"), + }, + { + "different metric tag names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t3": ""}, + }, + }, + nil, + }, + { + "different metric tag values", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "foo", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "bar", "t2": ""}, + }, + }, + nil, + }, + { + "different metric names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn2", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + }, + nil, + }, + { + "different field names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn2", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + }, + nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := OpcUA{ + nodes: tt.nodes, + Log: testutil.Logger{}, + } + require.Equal(t, tt.err, o.validateOPCTags()) + }) + } } diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index c0eac2483eb22..e1304fa304fc6 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -9,7 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" "log" "math/big" "net" @@ -27,16 +26,15 @@ import ( // SELF SIGNED CERT FUNCTIONS func newTempDir() (string, error) { - dir, err := ioutil.TempDir("", "ssc") + dir, err := os.MkdirTemp("", "ssc") return dir, err } -func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (string, string) { - +func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (cert string, key string, err error) { dir, _ := newTempDir() if len(host) == 0 { - log.Fatalf("Missing required host parameter") + return "", "", fmt.Errorf("missing required host parameter") } if rsaBits == 0 { rsaBits = 2048 @@ -50,7 +48,7 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D priv, err := rsa.GenerateKey(rand.Reader, rsaBits) if err != nil { - log.Fatalf("failed to generate private key: %s", err) + return "", "", fmt.Errorf("failed to generate private key: %s", err) } notBefore := time.Now() @@ -59,7 +57,7 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - log.Fatalf("failed to generate serial number: %s", err) + return "", "", fmt.Errorf("failed to generate serial number: %s", err) } template := x509.Certificate{ @@ -89,33 +87,36 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv) if err != nil { - log.Fatalf("Failed to create certificate: %s", err) + return "", "", fmt.Errorf("failed to create certificate: %s", err) } certOut, err := os.Create(certFile) if err != nil { - log.Fatalf("failed to open %s for writing: %s", certFile, err) + return "", "", fmt.Errorf("failed to open %s for writing: %s", certFile, err) } if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - log.Fatalf("failed to write data to %s: %s", certFile, err) + return "", "", fmt.Errorf("failed to write data to %s: %s", certFile, err) } if err := certOut.Close(); err != nil { - log.Fatalf("error closing %s: %s", certFile, err) + return "", "", fmt.Errorf("error closing %s: %s", certFile, err) } keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { - log.Printf("failed to open %s for writing: %s", keyFile, err) - return "", "" + return "", "", fmt.Errorf("failed to open %s for writing: %s", keyFile, err) + } + keyBlock, err := pemBlockForKey(priv) + if err != nil { + return "", "", fmt.Errorf("error generating block: %v", err) } - if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil { - log.Fatalf("failed to write data to %s: %s", keyFile, err) + if err := pem.Encode(keyOut, keyBlock); err != nil { + return "", "", fmt.Errorf("failed to write data to %s: %s", keyFile, err) } if err := keyOut.Close(); err != nil { - log.Fatalf("error closing %s: %s", keyFile, err) + return "", "", fmt.Errorf("error closing %s: %s", keyFile, err) } - return certFile, keyFile + return certFile, keyFile, nil } func publicKey(priv interface{}) interface{} { @@ -129,25 +130,23 @@ func publicKey(priv interface{}) interface{} { } } -func pemBlockForKey(priv interface{}) *pem.Block { +func pemBlockForKey(priv interface{}) (*pem.Block, error) { switch k := priv.(type) { case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}, nil case *ecdsa.PrivateKey: b, err := x509.MarshalECPrivateKey(k) if err != nil { - fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err) - os.Exit(2) + return nil, fmt.Errorf("unable to marshal ECDSA private key: %v", err) } - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}, nil default: - return nil + return nil, nil } } -// OPT FUNCTIONS - -func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) []opcua.Option { +//revive:disable-next-line +func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) ([]opcua.Option, error) { opts := []opcua.Option{} appuri := "urn:telegraf:gopcua:client" appname := "Telegraf" @@ -158,9 +157,13 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, opts = append(opts, opcua.RequestTimeout(requestTimeout)) + var err error if certFile == "" && keyFile == "" { if policy != "None" || mode != "None" { - certFile, keyFile = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour)) + certFile, keyFile, err = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour)) + if err != nil { + return nil, err + } } } @@ -173,7 +176,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, } else { pk, ok := c.PrivateKey.(*rsa.PrivateKey) if !ok { - log.Fatalf("Invalid private key") + return nil, fmt.Errorf("invalid private key") } cert = c.Certificate[0] opts = append(opts, opcua.PrivateKey(pk), opcua.Certificate(cert)) @@ -191,11 +194,15 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, secPolicy = ua.SecurityPolicyURIPrefix + policy policy = "" default: - log.Fatalf("Invalid security policy: %s", policy) + return nil, fmt.Errorf("invalid security policy: %s", policy) } // Select the most appropriate authentication mode from server capabilities and user input - authMode, authOption := generateAuth(auth, cert, username, password) + authMode, authOption, err := generateAuth(auth, cert, username, password) + if err != nil { + return nil, err + } + opts = append(opts, authOption) var secMode ua.MessageSecurityMode @@ -211,7 +218,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, secMode = ua.MessageSecurityModeSignAndEncrypt mode = "" default: - log.Fatalf("Invalid security mode: %s", mode) + return nil, fmt.Errorf("invalid security mode: %s", mode) } // Allow input of only one of sec-mode,sec-policy when choosing 'None' @@ -253,24 +260,23 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, } if serverEndpoint == nil { // Didn't find an endpoint with matching policy and mode. - log.Printf("unable to find suitable server endpoint with selected sec-policy and sec-mode") - log.Fatalf("quitting") + return nil, fmt.Errorf("unable to find suitable server endpoint with selected sec-policy and sec-mode") } secPolicy = serverEndpoint.SecurityPolicyURI secMode = serverEndpoint.SecurityMode // Check that the selected endpoint is a valid combo - err := validateEndpointConfig(endpoints, secPolicy, secMode, authMode) + err = validateEndpointConfig(endpoints, secPolicy, secMode, authMode) if err != nil { - log.Fatalf("error validating input: %s", err) + return nil, fmt.Errorf("error validating input: %s", err) } opts = append(opts, opcua.SecurityFromEndpoint(serverEndpoint, authMode)) - return opts + return opts, nil } -func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option) { +func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option, error) { var err error var authMode ua.UserTokenType @@ -285,13 +291,13 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua if un == "" { if err != nil { - log.Fatalf("error reading username input: %s", err) + return 0, nil, fmt.Errorf("error reading the username input: %s", err) } } if pw == "" { if err != nil { - log.Fatalf("error reading username input: %s", err) + return 0, nil, fmt.Errorf("error reading the password input: %s", err) } } @@ -310,10 +316,9 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua log.Printf("unknown auth-mode, defaulting to Anonymous") authMode = ua.UserTokenTypeAnonymous authOption = opcua.AuthAnonymous() - } - return authMode, authOption + return authMode, authOption, nil } func validateEndpointConfig(endpoints []*ua.EndpointDescription, secPolicy string, secMode ua.MessageSecurityMode, authMode ua.UserTokenType) error { diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index d5ed7e4cc1c3f..7a3f766718c52 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -5,10 +5,11 @@ import ( "strconv" "strings" + "gopkg.in/ldap.v3" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/ldap.v3" ) type Openldap struct { @@ -110,13 +111,15 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - if o.TLS == "ldaps" { + + switch o.TLS { + case "ldaps": l, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port), tlsConfig) if err != nil { acc.AddError(err) return nil } - } else if o.TLS == "starttls" { + case "starttls": l, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port)) if err != nil { acc.AddError(err) @@ -127,8 +130,8 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - } else { - acc.AddError(fmt.Errorf("Invalid setting for ssl: %s", o.TLS)) + default: + acc.AddError(fmt.Errorf("invalid setting for ssl: %s", o.TLS)) return nil } } else { @@ -190,7 +193,6 @@ func gatherSearchResult(sr *ldap.SearchResult, o *Openldap, acc telegraf.Accumul } } acc.AddFields("openldap", fields, tags) - return } // Convert a DN to metric name, eg cn=Read,cn=Waiters,cn=Monitor becomes waiters_read @@ -208,15 +210,15 @@ func dnToMetric(dn string, o *Openldap) string { metricParts[i], metricParts[j] = metricParts[j], metricParts[i] } return strings.Join(metricParts[1:], "_") - } else { - metricName := strings.Trim(dn, " ") - metricName = strings.Replace(metricName, " ", "_", -1) - metricName = strings.ToLower(metricName) - metricName = strings.TrimPrefix(metricName, "cn=") - metricName = strings.Replace(metricName, strings.ToLower("cn=Monitor"), "", -1) - metricName = strings.Replace(metricName, "cn=", "_", -1) - return strings.Replace(metricName, ",", "", -1) } + + metricName := strings.Trim(dn, " ") + metricName = strings.Replace(metricName, " ", "_", -1) + metricName = strings.ToLower(metricName) + metricName = strings.TrimPrefix(metricName, "cn=") + metricName = strings.Replace(metricName, strings.ToLower("cn=Monitor"), "", -1) + metricName = strings.Replace(metricName, "cn=", "_", -1) + return strings.Replace(metricName, ",", "", -1) } func init() { diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index 76d9cc3a9dd42..ac9e810f0b49e 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -4,10 +4,10 @@ import ( "strconv" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/ldap.v3" + + "github.com/influxdata/telegraf/testutil" ) func TestOpenldapMockResult(t *testing.T) { @@ -33,7 +33,7 @@ func TestOpenldapMockResult(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapNoConnection(t *testing.T) { +func TestOpenldapNoConnectionIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -45,15 +45,13 @@ func TestOpenldapNoConnection(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } -func TestOpenldapGeneratesMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapGeneratesMetricsIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -66,10 +64,8 @@ func TestOpenldapGeneratesMetrics(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapStartTLS(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapStartTLSIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -84,10 +80,8 @@ func TestOpenldapStartTLS(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapLDAPS(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapLDAPSIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -102,10 +96,8 @@ func TestOpenldapLDAPS(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapInvalidSSL(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapInvalidSSLIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -116,15 +108,13 @@ func TestOpenldapInvalidSSL(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } -func TestOpenldapBind(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapBindIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -142,17 +132,15 @@ func TestOpenldapBind(t *testing.T) { } func commonTests(t *testing.T, o *Openldap, acc *testutil.Accumulator) { - assert.Empty(t, acc.Errors, "accumulator had no errors") - assert.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") - assert.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") - assert.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") - assert.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") + require.Empty(t, acc.Errors, "accumulator had no errors") + require.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") + require.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") + require.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") + require.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") } -func TestOpenldapReverseMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapReverseMetricsIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -167,5 +155,5 @@ func TestOpenldapReverseMetrics(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") + require.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") } diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index e7723b480a581..2689c9cc7c845 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -10,16 +10,11 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -// Mapping of ntpctl header names to tag keys -var tagHeaders = map[string]string{ - "st": "stratum", -} - // Mapping of the ntpctl tag key to the index in the command output var tagI = map[string]int{ "stratum": 2, @@ -40,20 +35,19 @@ var intI = map[string]int{ "poll": 4, } -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) // Openntpd is used to store configuration values type Openntpd struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/ntpctl" -var defaultTimeout = internal.Duration{Duration: 5 * time.Second} +var defaultTimeout = config.Duration(5 * time.Second) func (n *Openntpd) Description() string { return "Get standard NTP query metrics from OpenNTPD." @@ -73,19 +67,19 @@ func (n *Openntpd) SampleConfig() string { } // Shell out to ntpctl and return the output -func openntpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func openntpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { cmdArgs := []string{"-s", "peers"} cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running ntpctl: %s", err) } @@ -133,8 +127,8 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { fields = strings.Fields(line) // if there is an ntpctl state prefix, remove it and make it it's own tag - if strings.ContainsAny(string(fields[0]), "*") { - tags["state_prefix"] = string(fields[0]) + if strings.ContainsAny(fields[0], "*") { + tags["state_prefix"] = fields[0] fields = fields[1:] } @@ -156,16 +150,13 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { } if key == "next" || key == "poll" { - m, err := strconv.ParseInt(strings.TrimSuffix(fields[index], "s"), 10, 64) if err != nil { acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) continue } mFields[key] = m - } else { - m, err := strconv.ParseInt(fields[index], 10, 64) if err != nil { acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) @@ -185,23 +176,19 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { } if key == "offset" || key == "delay" || key == "jitter" { - m, err := strconv.ParseFloat(strings.TrimSuffix(fields[index], "ms"), 64) if err != nil { acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) continue } mFields[key] = m - } else { - m, err := strconv.ParseFloat(fields[index], 64) if err != nil { acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) continue } mFields[key] = m - } } acc.AddFields("openntpd", mFields, tags) diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index d629949a533c4..ffca02b31a908 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -3,17 +3,15 @@ package openntpd import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { + return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,15 +19,15 @@ func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(st func TestParseSimpleOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutput, TestTimeout, false), + run: OpenntpdCTL(simpleOutput), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -52,15 +50,15 @@ func TestParseSimpleOutput(t *testing.T) { func TestParseSimpleOutputwithStatePrefix(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputwithStatePrefix, TestTimeout, false), + run: OpenntpdCTL(simpleOutputwithStatePrefix), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -84,15 +82,15 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { func TestParseSimpleOutputInvalidPeer(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false), + run: OpenntpdCTL(simpleOutputInvalidPeer), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -112,15 +110,15 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { func TestParseSimpleOutputServersDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServersDNSError, TestTimeout, false), + run: OpenntpdCTL(simpleOutputServersDNSError), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(2), @@ -154,15 +152,15 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { func TestParseSimpleOutputServerDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServerDNSError, TestTimeout, false), + run: OpenntpdCTL(simpleOutputServerDNSError), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(12), @@ -182,15 +180,15 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(fullOutput, TestTimeout, false), + run: OpenntpdCTL(fullOutput), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(20)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(20)) - assert.Equal(t, acc.NFields(), 113) + require.Equal(t, acc.NFields(), 113) firstpeerfields := map[string]interface{}{ "wt": int64(1), diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index c3f76f2efa850..9ce6ec5421ff1 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -10,25 +10,25 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) // Opensmtpd is used to store configuration values type Opensmtpd struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/smtpctl" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## If running as a restricted user you can prepend sudo for additional access: @@ -51,19 +51,19 @@ func (s *Opensmtpd) SampleConfig() string { } // Shell out to opensmtpd_stat and return the output -func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func opensmtpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { cmdArgs := []string{"show", "stats"} cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running smtpctl: %s", err) } @@ -77,8 +77,8 @@ func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (* // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { // Always exclude uptime.human statistics - stat_excluded := []string{"uptime.human"} - filter_excluded, err := filter.Compile(stat_excluded) + statExcluded := []string{"uptime.human"} + filterExcluded, err := filter.Compile(statExcluded) if err != nil { return err } @@ -92,7 +92,6 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) scanner := bufio.NewScanner(out) for scanner.Scan() { - cols := strings.Split(scanner.Text(), "=") // Check split correctness @@ -104,7 +103,7 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { value := cols[1] // Filter value - if filter_excluded.Match(stat) { + if filterExcluded.Match(stat) { continue } @@ -112,8 +111,7 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { fields[field], err = strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v\n", - stat, value)) + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) } } diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 42e978b6c34e7..3b625be51cef2 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -3,17 +3,15 @@ package opensmtpd import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { + return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,15 +19,15 @@ func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Opensmtpd{ - run: SmtpCTL(fullOutput, TestTimeout, false), + run: SMTPCTL(fullOutput), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("opensmtpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("opensmtpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 36) + require.Equal(t, acc.NFields(), 36) acc.AssertContainsFields(t, "opensmtpd", parsedFullOutput) } diff --git a/plugins/inputs/opentelemetry/README.md b/plugins/inputs/opentelemetry/README.md new file mode 100644 index 0000000000000..20cc36d5d0403 --- /dev/null +++ b/plugins/inputs/opentelemetry/README.md @@ -0,0 +1,92 @@ +# OpenTelemetry Input Plugin + +This plugin receives traces, metrics and logs from [OpenTelemetry](https://opentelemetry.io) clients and agents via gRPC. + +### Configuration + +```toml +[[inputs.opentelemetry]] + ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service + ## address:port + # service_address = "0.0.0.0:4317" + + ## Override the default (5s) new connection timeout + # timeout = "5s" + + ## Override the default (prometheus-v1) metrics schema. + ## Supports: "prometheus-v1", "prometheus-v2" + ## For more information about the alternatives, read the Prometheus input + ## plugin notes. + # metrics_schema = "prometheus-v1" + + ## Optional TLS Config. + ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md + ## + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Add service certificate and key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +``` + +#### Schema + +The OpenTelemetry->InfluxDB conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) +and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/otel2influx) +are hosted at https://github.com/influxdata/influxdb-observability . + +Spans are stored in measurement `spans`. +Logs are stored in measurement `logs`. + +For metrics, two output schemata exist. +Metrics received with `metrics_schema=prometheus-v1` are assigned measurement from the OTel field `Metric.name`. +Metrics received with `metrics_schema=prometheus-v2` are stored in measurement `prometheus`. + +Also see the OpenTelemetry output plugin for Telegraf. + +### Example Output + +#### Tracing Spans +``` +spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="d5270e78d85f570f",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="4c28227be6a010e1",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689169000 +spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="d5270e78d85f570f",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689135000 +spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="b57e98af78c3399b",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="a0643a156d7f9f7f",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689388000 +spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="b57e98af78c3399b",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689303300 +spans end_time_unix_nano="2021-02-19 20:50:25.6896741 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="6a8e6a0edcc1c966",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="d68f7f3b41eb8075",status_code="STATUS_CODE_OK",trace_id="651dadde186b7834c52b13a28fc27bea" 1613767825689480300 +``` + +### Metrics - `prometheus-v1` +``` +cpu_temp,foo=bar gauge=87.332 +http_requests_total,method=post,code=200 counter=1027 +http_requests_total,method=post,code=400 counter=3 +http_request_duration_seconds 0.05=24054,0.1=33444,0.2=100392,0.5=129389,1=133988,sum=53423,count=144320 +rpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560473e+07,count=2693 +``` + +### Metrics - `prometheus-v2` +``` +prometheus,foo=bar cpu_temp=87.332 +prometheus,method=post,code=200 http_requests_total=1027 +prometheus,method=post,code=400 http_requests_total=3 +prometheus,le=0.05 http_request_duration_seconds_bucket=24054 +prometheus,le=0.1 http_request_duration_seconds_bucket=33444 +prometheus,le=0.2 http_request_duration_seconds_bucket=100392 +prometheus,le=0.5 http_request_duration_seconds_bucket=129389 +prometheus,le=1 http_request_duration_seconds_bucket=133988 +prometheus http_request_duration_seconds_count=144320,http_request_duration_seconds_sum=53423 +prometheus,quantile=0.01 rpc_duration_seconds=3102 +prometheus,quantile=0.05 rpc_duration_seconds=3272 +prometheus,quantile=0.5 rpc_duration_seconds=4773 +prometheus,quantile=0.9 rpc_duration_seconds=9001 +prometheus,quantile=0.99 rpc_duration_seconds=76656 +prometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_seconds_sum=2693 +``` + +### Logs +``` +logs fluent.tag="fluent.info",pid=18i,ppid=9i,worker=0i 1613769568895331700 +logs fluent.tag="fluent.debug",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200 +logs fluent.tag="fluent.info",worker=0i 1613769568896515100 +``` diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go new file mode 100644 index 0000000000000..437c723db3e28 --- /dev/null +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -0,0 +1,83 @@ +package opentelemetry + +import ( + "context" + "fmt" + + "github.com/influxdata/influxdb-observability/common" + "github.com/influxdata/influxdb-observability/otel2influx" + "go.opentelemetry.io/collector/model/otlpgrpc" +) + +type traceService struct { + converter *otel2influx.OtelTracesToLineProtocol + writer *writeToAccumulator +} + +var _ otlpgrpc.TracesServer = (*traceService)(nil) + +func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceService { + converter := otel2influx.NewOtelTracesToLineProtocol(logger) + return &traceService{ + converter: converter, + writer: writer, + } +} + +func (s *traceService) Export(ctx context.Context, req otlpgrpc.TracesRequest) (otlpgrpc.TracesResponse, error) { + err := s.converter.WriteTraces(ctx, req.Traces(), s.writer) + return otlpgrpc.NewTracesResponse(), err +} + +type metricsService struct { + converter *otel2influx.OtelMetricsToLineProtocol + writer *writeToAccumulator +} + +var _ otlpgrpc.MetricsServer = (*metricsService)(nil) + +var metricsSchemata = map[string]common.MetricsSchema{ + "prometheus-v1": common.MetricsSchemaTelegrafPrometheusV1, + "prometheus-v2": common.MetricsSchemaTelegrafPrometheusV2, +} + +func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema string) (*metricsService, error) { + ms, found := metricsSchemata[schema] + if !found { + return nil, fmt.Errorf("schema '%s' not recognized", schema) + } + + converter, err := otel2influx.NewOtelMetricsToLineProtocol(logger, ms) + if err != nil { + return nil, err + } + return &metricsService{ + converter: converter, + writer: writer, + }, nil +} + +func (s *metricsService) Export(ctx context.Context, req otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { + err := s.converter.WriteMetrics(ctx, req.Metrics(), s.writer) + return otlpgrpc.NewMetricsResponse(), err +} + +type logsService struct { + converter *otel2influx.OtelLogsToLineProtocol + writer *writeToAccumulator +} + +var _ otlpgrpc.LogsServer = (*logsService)(nil) + +func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsService { + converter := otel2influx.NewOtelLogsToLineProtocol(logger) + return &logsService{ + converter: converter, + writer: writer, + } +} + +func (s *logsService) Export(ctx context.Context, req otlpgrpc.LogsRequest) (otlpgrpc.LogsResponse, error) { + err := s.converter.WriteLogs(ctx, req.Logs(), s.writer) + return otlpgrpc.NewLogsResponse(), err +} diff --git a/plugins/inputs/opentelemetry/logger.go b/plugins/inputs/opentelemetry/logger.go new file mode 100644 index 0000000000000..3db3621bcc672 --- /dev/null +++ b/plugins/inputs/opentelemetry/logger.go @@ -0,0 +1,16 @@ +package opentelemetry + +import ( + "strings" + + "github.com/influxdata/telegraf" +) + +type otelLogger struct { + telegraf.Logger +} + +func (l otelLogger) Debug(msg string, kv ...interface{}) { + format := msg + strings.Repeat(" %s=%q", len(kv)/2) + l.Logger.Debugf(format, kv...) +} diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go new file mode 100644 index 0000000000000..85f32a7695efa --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -0,0 +1,127 @@ +package opentelemetry + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "go.opentelemetry.io/collector/model/otlpgrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type OpenTelemetry struct { + ServiceAddress string `toml:"service_address"` + MetricsSchema string `toml:"metrics_schema"` + + tls.ServerConfig + Timeout config.Duration `toml:"timeout"` + + Log telegraf.Logger `toml:"-"` + + listener net.Listener // overridden in tests + grpcServer *grpc.Server + + wg sync.WaitGroup +} + +const sampleConfig = ` + ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service + ## address:port + # service_address = "0.0.0.0:4317" + + ## Override the default (5s) new connection timeout + # timeout = "5s" + + ## Override the default (prometheus-v1) metrics schema. + ## Supports: "prometheus-v1", "prometheus-v2" + ## For more information about the alternatives, read the Prometheus input + ## plugin notes. + # metrics_schema = "prometheus-v1" + + ## Optional TLS Config. + ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md + ## + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Add service certificate and key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +` + +func (o *OpenTelemetry) SampleConfig() string { + return sampleConfig +} + +func (o *OpenTelemetry) Description() string { + return "Receive OpenTelemetry traces, metrics, and logs over gRPC" +} + +func (o *OpenTelemetry) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (o *OpenTelemetry) Start(accumulator telegraf.Accumulator) error { + var grpcOptions []grpc.ServerOption + if tlsConfig, err := o.ServerConfig.TLSConfig(); err != nil { + return err + } else if tlsConfig != nil { + grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(tlsConfig))) + } + if o.Timeout > 0 { + grpcOptions = append(grpcOptions, grpc.ConnectionTimeout(time.Duration(o.Timeout))) + } + + logger := &otelLogger{o.Log} + influxWriter := &writeToAccumulator{accumulator} + o.grpcServer = grpc.NewServer(grpcOptions...) + + otlpgrpc.RegisterTracesServer(o.grpcServer, newTraceService(logger, influxWriter)) + ms, err := newMetricsService(logger, influxWriter, o.MetricsSchema) + if err != nil { + return err + } + otlpgrpc.RegisterMetricsServer(o.grpcServer, ms) + otlpgrpc.RegisterLogsServer(o.grpcServer, newLogsService(logger, influxWriter)) + + if o.listener == nil { + o.listener, err = net.Listen("tcp", o.ServiceAddress) + if err != nil { + return err + } + } + + o.wg.Add(1) + go func() { + if err := o.grpcServer.Serve(o.listener); err != nil { + accumulator.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err)) + } + o.wg.Done() + }() + + return nil +} + +func (o *OpenTelemetry) Stop() { + if o.grpcServer != nil { + o.grpcServer.Stop() + } + + o.wg.Wait() +} + +func init() { + inputs.Add("opentelemetry", func() telegraf.Input { + return &OpenTelemetry{ + ServiceAddress: "0.0.0.0:4317", + MetricsSchema: "prometheus-v1", + Timeout: config.Duration(5 * time.Second), + } + }) +} diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go new file mode 100644 index 0000000000000..4704d779dfd49 --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -0,0 +1,82 @@ +package opentelemetry + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" +) + +func TestOpenTelemetry(t *testing.T) { + mockListener := bufconn.Listen(1024 * 1024) + plugin := inputs.Inputs["opentelemetry"]().(*OpenTelemetry) + plugin.listener = mockListener + accumulator := new(testutil.Accumulator) + + err := plugin.Start(accumulator) + require.NoError(t, err) + t.Cleanup(plugin.Stop) + + metricExporter, err := otlpmetricgrpc.New(context.Background(), + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithDialOption( + grpc.WithBlock(), + grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { + return mockListener.Dial() + })), + ) + require.NoError(t, err) + t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) + + pusher := controller.New( + processor.NewFactory( + simple.NewWithExactDistribution(), + metricExporter, + ), + controller.WithExporter(metricExporter), + ) + + err = pusher.Start(context.Background()) + require.NoError(t, err) + t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) + + global.SetMeterProvider(pusher) + + // write metrics + meter := global.Meter("library-name") + counter := metric.Must(meter).NewInt64Counter("measurement-counter") + meter.RecordBatch(context.Background(), nil, counter.Measurement(7)) + + err = pusher.Stop(context.Background()) + require.NoError(t, err) + + // Shutdown + + plugin.Stop() + + err = metricExporter.Shutdown(context.Background()) + require.NoError(t, err) + + // Check + + require.Empty(t, accumulator.Errors) + + require.Len(t, accumulator.Metrics, 1) + got := accumulator.Metrics[0] + require.Equal(t, "measurement-counter", got.Measurement) + require.Equal(t, telegraf.Counter, got.Type) + require.Equal(t, "library-name", got.Tags["otel.library.name"]) +} diff --git a/plugins/inputs/opentelemetry/writer.go b/plugins/inputs/opentelemetry/writer.go new file mode 100644 index 0000000000000..58906e62aa391 --- /dev/null +++ b/plugins/inputs/opentelemetry/writer.go @@ -0,0 +1,32 @@ +package opentelemetry + +import ( + "context" + "fmt" + "time" + + "github.com/influxdata/influxdb-observability/common" + "github.com/influxdata/telegraf" +) + +type writeToAccumulator struct { + accumulator telegraf.Accumulator +} + +func (w *writeToAccumulator) WritePoint(_ context.Context, measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time, vType common.InfluxMetricValueType) error { + switch vType { + case common.InfluxMetricValueTypeUntyped: + w.accumulator.AddFields(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeGauge: + w.accumulator.AddGauge(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeSum: + w.accumulator.AddCounter(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeHistogram: + w.accumulator.AddHistogram(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeSummary: + w.accumulator.AddSummary(measurement, fields, tags, ts) + default: + return fmt.Errorf("unrecognized InfluxMetricValueType %q", vType) + } + return nil +} diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 94055a6f8bb6a..c4f2f4f032d7e 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -13,7 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -21,25 +21,25 @@ const ( // https://openweathermap.org/current#severalid // Call for several city IDs // The limit of locations is 20. - owmRequestSeveralCityId int = 20 + owmRequestSeveralCityID int = 20 - defaultBaseUrl = "https://api.openweathermap.org/" - defaultResponseTimeout time.Duration = time.Second * 5 - defaultUnits string = "metric" - defaultLang string = "en" + defaultBaseURL = "https://api.openweathermap.org/" + defaultResponseTimeout = time.Second * 5 + defaultUnits = "metric" + defaultLang = "en" ) type OpenWeatherMap struct { - AppId string `toml:"app_id"` - CityId []string `toml:"city_id"` - Lang string `toml:"lang"` - Fetch []string `toml:"fetch"` - BaseUrl string `toml:"base_url"` - ResponseTimeout internal.Duration `toml:"response_timeout"` - Units string `toml:"units"` - - client *http.Client - baseUrl *url.URL + AppID string `toml:"app_id"` + CityID []string `toml:"city_id"` + Lang string `toml:"lang"` + Fetch []string `toml:"fetch"` + BaseURL string `toml:"base_url"` + ResponseTimeout config.Duration `toml:"response_timeout"` + Units string `toml:"units"` + + client *http.Client + baseParsedURL *url.URL } var sampleConfig = ` @@ -87,12 +87,12 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { for _, fetch := range n.Fetch { if fetch == "forecast" { - for _, city := range n.CityId { + for _, city := range n.CityID { addr := n.formatURL("/data/2.5/forecast", city) wg.Add(1) go func() { defer wg.Done() - status, err := n.gatherUrl(addr) + status, err := n.gatherURL(addr) if err != nil { acc.AddError(err) return @@ -103,10 +103,10 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } } else if fetch == "weather" { j := 0 - for j < len(n.CityId) { + for j < len(n.CityID) { strs = make([]string, 0) - for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ { - strs = append(strs, n.CityId[j]) + for i := 0; j < len(n.CityID) && i < owmRequestSeveralCityID; i++ { + strs = append(strs, n.CityID[j]) j++ } cities := strings.Join(strs, ",") @@ -115,7 +115,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func() { defer wg.Done() - status, err := n.gatherUrl(addr) + status, err := n.gatherURL(addr) if err != nil { acc.AddError(err) return @@ -124,7 +124,6 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { gatherWeather(acc, status) }() } - } } @@ -132,20 +131,20 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { return nil } -func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = defaultResponseTimeout +func (n *OpenWeatherMap) createHTTPClient() *http.Client { + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(defaultResponseTimeout) } client := &http.Client{ Transport: &http.Transport{}, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } - return client, nil + return client } -func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { +func (n *OpenWeatherMap) gatherURL(addr string) (*Status, error) { resp, err := n.client.Get(addr) if err != nil { return nil, fmt.Errorf("error making HTTP request to %s: %s", addr, err) @@ -165,7 +164,7 @@ func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { return nil, fmt.Errorf("%s returned unexpected content type %s", addr, mediaType) } - return gatherWeatherUrl(resp.Body) + return gatherWeatherURL(resp.Body) } type WeatherEntry struct { @@ -191,7 +190,7 @@ type WeatherEntry struct { Deg float64 `json:"deg"` Speed float64 `json:"speed"` } `json:"wind"` - Id int64 `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Coord struct { Lat float64 `json:"lat"` @@ -213,13 +212,13 @@ type Status struct { Lon float64 `json:"lon"` } `json:"coord"` Country string `json:"country"` - Id int64 `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` } `json:"city"` List []WeatherEntry `json:"list"` } -func gatherWeatherUrl(r io.Reader) (*Status, error) { +func gatherWeatherURL(r io.Reader) (*Status, error) { dec := json.NewDecoder(r) status := &Status{} if err := dec.Decode(status); err != nil { @@ -253,7 +252,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) { } tags := map[string]string{ "city": e.Name, - "city_id": strconv.FormatInt(e.Id, 10), + "city_id": strconv.FormatInt(e.ID, 10), "country": e.Sys.Country, "forecast": "*", } @@ -271,7 +270,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) { func gatherForecast(acc telegraf.Accumulator, status *Status) { tags := map[string]string{ - "city_id": strconv.FormatInt(status.City.Id, 10), + "city_id": strconv.FormatInt(status.City.ID, 10), "forecast": "*", "city": status.City.Name, "country": status.City.Country, @@ -300,29 +299,24 @@ func gatherForecast(acc telegraf.Accumulator, status *Status) { func init() { inputs.Add("openweathermap", func() telegraf.Input { - tmout := internal.Duration{ - Duration: defaultResponseTimeout, - } + tmout := config.Duration(defaultResponseTimeout) return &OpenWeatherMap{ ResponseTimeout: tmout, - BaseUrl: defaultBaseUrl, + BaseURL: defaultBaseURL, } }) } func (n *OpenWeatherMap) Init() error { var err error - n.baseUrl, err = url.Parse(n.BaseUrl) + n.baseParsedURL, err = url.Parse(n.BaseURL) if err != nil { return err } // Create an HTTP client that is re-used for each // collection interval - n.client, err = n.createHttpClient() - if err != nil { - return err - } + n.client = n.createHTTPClient() switch n.Units { case "imperial", "standard", "metric": @@ -349,7 +343,7 @@ func (n *OpenWeatherMap) Init() error { func (n *OpenWeatherMap) formatURL(path string, city string) string { v := url.Values{ "id": []string{city}, - "APPID": []string{n.AppId}, + "APPID": []string{n.AppID}, "lang": []string{n.Lang}, "units": []string{n.Units}, } @@ -359,5 +353,5 @@ func (n *OpenWeatherMap) formatURL(path string, city string) string { RawQuery: v.Encode(), } - return n.baseUrl.ResolveReference(relative).String() + return n.baseParsedURL.ResolveReference(relative).String() } diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go index 9bee1d2e96199..0e86646a27594 100644 --- a/plugins/inputs/openweathermap/openweathermap_test.go +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -408,26 +408,26 @@ func TestForecastGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/group" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"2988507"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"2988507"}, Fetch: []string{"weather", "forecast"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -492,26 +492,26 @@ func TestWeatherGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/forecast" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"2988507"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"2988507"}, Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -552,26 +552,26 @@ func TestRainMetrics(t *testing.T) { rsp = rainWeatherResponse w.Header()["Content-Type"] = []string{"application/json"} } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"111", "222", "333", "444"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"111", "222", "333", "444"}, Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ // City with 1h rain value @@ -695,26 +695,26 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/forecast" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"524901", "703448", "2643743"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"524901", "703448", "2643743"}, Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -803,28 +803,28 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { func TestFormatURL(t *testing.T) { n := &OpenWeatherMap{ - AppId: "appid", - Units: "units", - Lang: "lang", - BaseUrl: "http://foo.com", + AppID: "appid", + Units: "metric", + Lang: "de", + BaseURL: "http://foo.com", } - n.Init() + require.NoError(t, n.Init()) require.Equal(t, - "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=lang&units=units", + "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=de&units=metric", n.formatURL("/data/2.5/forecast", "12345")) } func TestDefaultUnits(t *testing.T) { n := &OpenWeatherMap{} - n.Init() + require.NoError(t, n.Init()) require.Equal(t, "metric", n.Units) } func TestDefaultLang(t *testing.T) { n := &OpenWeatherMap{} - n.Init() + require.NoError(t, n.Init()) require.Equal(t, "en", n.Lang) } diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md index 688f4e69aef0d..6821635103d78 100644 --- a/plugins/inputs/passenger/README.md +++ b/plugins/inputs/passenger/README.md @@ -15,9 +15,6 @@ manage your series cardinality: `tagexclude` to remove the `pid` and `process_group_id` tags. - Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/). -- Limit series cardinality in your database using the - [`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and - [`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings. - Consider using the [Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/). - Monitor your databases diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index f00bfc824de28..fbd016af60a0e 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -32,27 +32,27 @@ func (p *passenger) parseCommand() (string, []string) { } type info struct { - Passenger_version string `xml:"passenger_version"` - Process_count int `xml:"process_count"` - Capacity_used int `xml:"capacity_used"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Max int `xml:"max"` - Supergroups struct { + PassengerVersion string `xml:"passenger_version"` + ProcessCount int `xml:"process_count"` + CapacityUsed int `xml:"capacity_used"` + GetWaitListSize int `xml:"get_wait_list_size"` + Max int `xml:"max"` + Supergroups struct { Supergroup []struct { - Name string `xml:"name"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Capacity_used int `xml:"capacity_used"` - Group []struct { - Name string `xml:"name"` - AppRoot string `xml:"app_root"` - AppType string `xml:"app_type"` - Enabled_process_count int `xml:"enabled_process_count"` - Disabling_process_count int `xml:"disabling_process_count"` - Disabled_process_count int `xml:"disabled_process_count"` - Capacity_used int `xml:"capacity_used"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Processes_being_spawned int `xml:"processes_being_spawned"` - Processes struct { + Name string `xml:"name"` + GetWaitListSize int `xml:"get_wait_list_size"` + CapacityUsed int `xml:"capacity_used"` + Group []struct { + Name string `xml:"name"` + AppRoot string `xml:"app_root"` + AppType string `xml:"app_type"` + EnabledProcessCount int `xml:"enabled_process_count"` + DisablingProcessCount int `xml:"disabling_process_count"` + DisabledProcessCount int `xml:"disabled_process_count"` + CapacityUsed int `xml:"capacity_used"` + GetWaitListSize int `xml:"get_wait_list_size"` + ProcessesBeingSpawned int `xml:"processes_being_spawned"` + Processes struct { Process []*process `xml:"process"` } `xml:"processes"` } `xml:"group"` @@ -61,28 +61,28 @@ type info struct { } type process struct { - Pid int `xml:"pid"` - Concurrency int `xml:"concurrency"` - Sessions int `xml:"sessions"` - Busyness int `xml:"busyness"` - Processed int `xml:"processed"` - Spawner_creation_time int64 `xml:"spawner_creation_time"` - Spawn_start_time int64 `xml:"spawn_start_time"` - Spawn_end_time int64 `xml:"spawn_end_time"` - Last_used int64 `xml:"last_used"` - Uptime string `xml:"uptime"` - Code_revision string `xml:"code_revision"` - Life_status string `xml:"life_status"` - Enabled string `xml:"enabled"` - Has_metrics bool `xml:"has_metrics"` - Cpu int64 `xml:"cpu"` - Rss int64 `xml:"rss"` - Pss int64 `xml:"pss"` - Private_dirty int64 `xml:"private_dirty"` - Swap int64 `xml:"swap"` - Real_memory int64 `xml:"real_memory"` - Vmsize int64 `xml:"vmsize"` - Process_group_id string `xml:"process_group_id"` + Pid int `xml:"pid"` + Concurrency int `xml:"concurrency"` + Sessions int `xml:"sessions"` + Busyness int `xml:"busyness"` + Processed int `xml:"processed"` + SpawnerCreationTime int64 `xml:"spawner_creation_time"` + SpawnStartTime int64 `xml:"spawn_start_time"` + SpawnEndTime int64 `xml:"spawn_end_time"` + LastUsed int64 `xml:"last_used"` + Uptime string `xml:"uptime"` + CodeRevision string `xml:"code_revision"` + LifeStatus string `xml:"life_status"` + Enabled string `xml:"enabled"` + HasMetrics bool `xml:"has_metrics"` + CPU int64 `xml:"cpu"` + Rss int64 `xml:"rss"` + Pss int64 `xml:"pss"` + PrivateDirty int64 `xml:"private_dirty"` + Swap int64 `xml:"swap"` + RealMemory int64 `xml:"real_memory"` + Vmsize int64 `xml:"vmsize"` + ProcessGroupID string `xml:"process_group_id"` } func (p *process) getUptime() int64 { @@ -137,31 +137,27 @@ var sampleConfig = ` command = "passenger-status -v --show=xml" ` -func (r *passenger) SampleConfig() string { +func (p *passenger) SampleConfig() string { return sampleConfig } -func (r *passenger) Description() string { +func (p *passenger) Description() string { return "Read metrics of passenger using passenger-status" } -func (g *passenger) Gather(acc telegraf.Accumulator) error { - if g.Command == "" { - g.Command = "passenger-status -v --show=xml" +func (p *passenger) Gather(acc telegraf.Accumulator) error { + if p.Command == "" { + p.Command = "passenger-status -v --show=xml" } - cmd, args := g.parseCommand() + cmd, args := p.parseCommand() out, err := exec.Command(cmd, args...).Output() if err != nil { return err } - if err = importMetric(out, acc); err != nil { - return err - } - - return nil + return importMetric(out, acc) } func importMetric(stat []byte, acc telegraf.Accumulator) error { @@ -170,17 +166,17 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { decoder := xml.NewDecoder(bytes.NewReader(stat)) decoder.CharsetReader = charset.NewReaderLabel if err := decoder.Decode(&p); err != nil { - return fmt.Errorf("Cannot parse input with error: %v\n", err) + return fmt.Errorf("cannot parse input with error: %v", err) } tags := map[string]string{ - "passenger_version": p.Passenger_version, + "passenger_version": p.PassengerVersion, } fields := map[string]interface{}{ - "process_count": p.Process_count, + "process_count": p.ProcessCount, "max": p.Max, - "capacity_used": p.Capacity_used, - "get_wait_list_size": p.Get_wait_list_size, + "capacity_used": p.CapacityUsed, + "get_wait_list_size": p.GetWaitListSize, } acc.AddFields("passenger", fields, tags) @@ -189,8 +185,8 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "name": sg.Name, } fields := map[string]interface{}{ - "get_wait_list_size": sg.Get_wait_list_size, - "capacity_used": sg.Capacity_used, + "get_wait_list_size": sg.GetWaitListSize, + "capacity_used": sg.CapacityUsed, } acc.AddFields("passenger_supergroup", fields, tags) @@ -201,9 +197,9 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "app_type": group.AppType, } fields := map[string]interface{}{ - "get_wait_list_size": group.Get_wait_list_size, - "capacity_used": group.Capacity_used, - "processes_being_spawned": group.Processes_being_spawned, + "get_wait_list_size": group.GetWaitListSize, + "capacity_used": group.CapacityUsed, + "processes_being_spawned": group.ProcessesBeingSpawned, } acc.AddFields("passenger_group", fields, tags) @@ -213,26 +209,26 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "app_root": group.AppRoot, "supergroup_name": sg.Name, "pid": fmt.Sprintf("%d", process.Pid), - "code_revision": process.Code_revision, - "life_status": process.Life_status, - "process_group_id": process.Process_group_id, + "code_revision": process.CodeRevision, + "life_status": process.LifeStatus, + "process_group_id": process.ProcessGroupID, } fields := map[string]interface{}{ "concurrency": process.Concurrency, "sessions": process.Sessions, "busyness": process.Busyness, "processed": process.Processed, - "spawner_creation_time": process.Spawner_creation_time, - "spawn_start_time": process.Spawn_start_time, - "spawn_end_time": process.Spawn_end_time, - "last_used": process.Last_used, + "spawner_creation_time": process.SpawnerCreationTime, + "spawn_start_time": process.SpawnStartTime, + "spawn_end_time": process.SpawnEndTime, + "last_used": process.LastUsed, "uptime": process.getUptime(), - "cpu": process.Cpu, + "cpu": process.CPU, "rss": process.Rss, "pss": process.Pss, - "private_dirty": process.Private_dirty, + "private_dirty": process.PrivateDirty, "swap": process.Swap, - "real_memory": process.Real_memory, + "real_memory": process.RealMemory, "vmsize": process.Vmsize, } acc.AddFields("passenger_process", fields, tags) diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index c54239d39ecfd..ecbeeb532fd1e 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -2,22 +2,42 @@ package passenger import ( "fmt" - "io/ioutil" "os" + "path/filepath" + "runtime" + "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) -func fakePassengerStatus(stat string) { - content := fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) - ioutil.WriteFile("/tmp/passenger-status", []byte(content), 0700) +func fakePassengerStatus(stat string) (string, error) { + var fileExtension, content string + if runtime.GOOS == "windows" { + fileExtension = ".bat" + content = "@echo off\n" + for _, line := range strings.Split(strings.TrimSuffix(stat, "\n"), "\n") { + content += "for /f \"delims=\" %%A in (\"" + line + "\") do echo %%~A\n" //my eyes are bleeding + } + } else { + content = fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) + } + + tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) + if err := os.WriteFile(tempFilePath, []byte(content), 0700); err != nil { + return "", err + } + + return tempFilePath, nil } -func teardown() { - os.Remove("/tmp/passenger-status") +func teardown(tempFilePath string) { + // Ignore the returned error as we want to remove the file and ignore missing file errors + //nolint:errcheck,revive + os.Remove(tempFilePath) } func Test_Invalid_Passenger_Status_Cli(t *testing.T) { @@ -29,51 +49,53 @@ func Test_Invalid_Passenger_Status_Cli(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), `exec: "an-invalid-command": executable file not found in $PATH`) + assert.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `) } func Test_Invalid_Xml(t *testing.T) { - fakePassengerStatus("invalid xml") - defer teardown() + tempFilePath, err := fakePassengerStatus("invalid xml") + require.NoError(t, err) + defer teardown(tempFilePath) r := &passenger{ - Command: "/tmp/passenger-status", + Command: tempFilePath, } var acc testutil.Accumulator - err := r.Gather(&acc) + err = r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), "Cannot parse input with error: EOF\n") + assert.Equal(t, "cannot parse input with error: EOF", err.Error()) } // We test this by ensure that the error message match the path of default cli func Test_Default_Config_Load_Default_Command(t *testing.T) { - fakePassengerStatus("invalid xml") - defer teardown() + tempFilePath, err := fakePassengerStatus("invalid xml") + require.NoError(t, err) + defer teardown(tempFilePath) r := &passenger{} var acc testutil.Accumulator - err := r.Gather(&acc) + err = r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), "exec: \"passenger-status\": executable file not found in $PATH") + assert.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") } func TestPassengerGenerateMetric(t *testing.T) { - fakePassengerStatus(sampleStat) - defer teardown() + tempFilePath, err := fakePassengerStatus(sampleStat) + require.NoError(t, err) + defer teardown(tempFilePath) //Now we tested again above server, with our authentication data r := &passenger{ - Command: "/tmp/passenger-status", + Command: tempFilePath, } var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "passenger_version": "5.0.17", diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 83215d8f62f3a..9d4e2ad47c1b8 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -7,6 +7,7 @@ The pf plugin retrieves this information by invoking the `pfstat` command. The ` * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. * Configure sudo to grant telegraf to run `pfctl` as root. This is the most restrictive option, but require sudo setup. +* Add "telegraf" to the "proxy" group as /dev/pf is owned by root:proxy. ### Using sudo diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 035c44fbe1404..429169d543ab5 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -164,7 +164,6 @@ func parseCounterTable(lines []string, fields map[string]interface{}) error { } func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]interface{}, entryTable []*Entry) error { - for _, v := range lines { entries := regex.FindStringSubmatch(v) if entries != nil { diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 0b8c8c16acd02..fead359d2271f 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -5,10 +5,10 @@ import ( "strconv" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" - _ "github.com/jackc/pgx/stdlib" // register driver + _ "github.com/jackc/pgx/v4/stdlib" // register driver ) type PgBouncer struct { @@ -61,7 +61,7 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { } for rows.Next() { - tags, columnMap, err := p.accRow(rows, acc, columns) + tags, columnMap, err := p.accRow(rows, columns) if err != nil { return err @@ -111,7 +111,7 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { } for poolRows.Next() { - tags, columnMap, err := p.accRow(poolRows, acc, columns) + tags, columnMap, err := p.accRow(poolRows, columns) if err != nil { return err } @@ -145,7 +145,7 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []string) (map[string]string, +func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string, map[string]*interface{}, error) { var columnVars []interface{} var dbname bytes.Buffer @@ -170,9 +170,13 @@ func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []stri } if columnMap["database"] != nil { // extract the database name from the column map - dbname.WriteString((*columnMap["database"]).(string)) + if _, err := dbname.WriteString((*columnMap["database"]).(string)); err != nil { + return nil, nil, err + } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return nil, nil, err + } } var tagAddress string @@ -189,11 +193,9 @@ func init() { inputs.Add("pgbouncer", func() telegraf.Input { return &PgBouncer{ Service: postgresql.Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: true, }, } diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go index 44e28c7f3335e..7dd75fb4ae487 100644 --- a/plugins/inputs/pgbouncer/pgbouncer_test.go +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -2,17 +2,16 @@ package pgbouncer import ( "fmt" + "testing" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) -func TestPgBouncerGeneratesMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { + t.Skip("Skipping test, connection refused") p := &PgBouncer{ Service: postgresql.Service{ @@ -28,15 +27,19 @@ func TestPgBouncerGeneratesMetrics(t *testing.T) { require.NoError(t, p.Start(&acc)) require.NoError(t, p.Gather(&acc)) - intMetrics := []string{ - "total_requests", + // Return value of pgBouncer + // [pgbouncer map[db:pgbouncer server:host=localhost user=pgbouncer dbname=pgbouncer port=6432 ] map[avg_query_count:0 avg_query_time:0 avg_wait_time:0 avg_xact_count:0 avg_xact_time:0 total_query_count:3 total_query_time:0 total_received:0 total_sent:0 total_wait_time:0 total_xact_count:3 total_xact_time:0] 1620163750039747891 pgbouncer_pools map[db:pgbouncer pool_mode:statement server:host=localhost user=pgbouncer dbname=pgbouncer port=6432 user:pgbouncer] map[cl_active:1 cl_waiting:0 maxwait:0 maxwait_us:0 sv_active:0 sv_idle:0 sv_login:0 sv_tested:0 sv_used:0] 1620163750041444466] + + intMetricsPgBouncer := []string{ "total_received", "total_sent", "total_query_time", - "avg_req", - "avg_recv", - "avg_sent", - "avg_query", + "avg_query_count", + "avg_query_time", + "avg_wait_time", + } + + intMetricsPgBouncerPools := []string{ "cl_active", "cl_waiting", "sv_active", @@ -51,16 +54,21 @@ func TestPgBouncerGeneratesMetrics(t *testing.T) { metricsCounted := 0 - for _, metric := range intMetrics { + for _, metric := range intMetricsPgBouncer { assert.True(t, acc.HasInt64Field("pgbouncer", metric)) metricsCounted++ } + for _, metric := range intMetricsPgBouncerPools { + assert.True(t, acc.HasInt64Field("pgbouncer_pools", metric)) + metricsCounted++ + } + for _, metric := range int32Metrics { assert.True(t, acc.HasInt32Field("pgbouncer", metric)) metricsCounted++ } assert.True(t, metricsCounted > 0) - assert.Equal(t, len(intMetrics)+len(int32Metrics), metricsCounted) + assert.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted) } diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 2ebdf2ffbca35..b6a6f956d3bf0 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/cgi" @@ -24,16 +23,16 @@ import ( // it's converted to an http.Request. type request struct { pw *io.PipeWriter - reqId uint16 + reqID uint16 params map[string]string buf [1024]byte rawParams []byte keepConn bool } -func newRequest(reqId uint16, flags uint8) *request { +func newRequest(reqID uint16, flags uint8) *request { r := &request{ - reqId: reqId, + reqID: reqID, params: map[string]string{}, keepConn: flags&flagKeepConn != 0, } @@ -79,7 +78,7 @@ func newResponse(c *child, req *request) *response { return &response{ req: req, header: http.Header{}, - w: newWriter(c.conn, typeStdout, req.reqId), + w: newWriter(c.conn, typeStdout, req.reqID), } } @@ -161,7 +160,7 @@ func (c *child) serve() { var errCloseConn = errors.New("fcgi: connection should be closed") -var emptyBody = ioutil.NopCloser(strings.NewReader("")) +var emptyBody = io.NopCloser(strings.NewReader("")) // ErrRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. @@ -173,7 +172,7 @@ var ErrConnClosed = errors.New("fcgi: connection to web server closed") func (c *child) handleRecord(rec *record) error { c.mu.Lock() - req, ok := c.requests[rec.h.Id] + req, ok := c.requests[rec.h.ID] c.mu.Unlock() if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { // The spec says to ignore unknown request IDs. @@ -193,12 +192,11 @@ func (c *child) handleRecord(rec *record) error { return err } if br.role != roleResponder { - c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) - return nil + return c.conn.writeEndRequest(rec.h.ID, 0, statusUnknownRole) } - req = newRequest(rec.h.Id, br.flags) + req = newRequest(rec.h.ID, br.flags) c.mu.Lock() - c.requests[rec.h.Id] = req + c.requests[rec.h.ID] = req c.mu.Unlock() return nil case typeParams: @@ -226,25 +224,32 @@ func (c *child) handleRecord(rec *record) error { if len(content) > 0 { // TODO(eds): This blocks until the handler reads from the pipe. // If the handler takes a long time, it might be a problem. - req.pw.Write(content) + if _, err := req.pw.Write(content); err != nil { + return err + } } else if req.pw != nil { - req.pw.Close() + if err := req.pw.Close(); err != nil { + return err + } } return nil case typeGetValues: values := map[string]string{"FCGI_MPXS_CONNS": "1"} - c.conn.writePairs(typeGetValuesResult, 0, values) - return nil + return c.conn.writePairs(typeGetValuesResult, 0, values) case typeData: // If the filter role is implemented, read the data stream here. return nil case typeAbortRequest: c.mu.Lock() - delete(c.requests, rec.h.Id) + delete(c.requests, rec.h.ID) c.mu.Unlock() - c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) + if err := c.conn.writeEndRequest(rec.h.ID, 0, statusRequestComplete); err != nil { + return err + } if req.pw != nil { - req.pw.CloseWithError(ErrRequestAborted) + if err := req.pw.CloseWithError(ErrRequestAborted); err != nil { + return err + } } if !req.keepConn { // connection will close upon return @@ -254,8 +259,7 @@ func (c *child) handleRecord(rec *record) error { default: b := make([]byte, 8) b[0] = byte(rec.h.Type) - c.conn.writeRecord(typeUnknownType, 0, b) - return nil + return c.conn.writeRecord(typeUnknownType, 0, b) } } @@ -265,16 +269,22 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { if err != nil { // there was an error reading the request r.WriteHeader(http.StatusInternalServerError) - c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) + if err := c.conn.writeRecord(typeStderr, req.reqID, []byte(err.Error())); err != nil { + return + } } else { httpReq.Body = body c.handler.ServeHTTP(r, httpReq) } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive r.Close() c.mu.Lock() - delete(c.requests, req.reqId) + delete(c.requests, req.reqID) c.mu.Unlock() - c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) + if err := c.conn.writeEndRequest(req.reqID, 0, statusRequestComplete); err != nil { + return + } // Consume the entire body, so the host isn't still writing to // us when we close the socket below in the !keepConn case, @@ -283,10 +293,14 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { // some sort of abort request to the host, so the host // can properly cut off the client sending all the data. // For now just bound it a little and - io.CopyN(ioutil.Discard, body, 100<<20) + //nolint:errcheck,revive + io.CopyN(io.Discard, body, 100<<20) + //nolint:errcheck,revive body.Close() if !req.keepConn { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.conn.Close() } } @@ -298,6 +312,8 @@ func (c *child) cleanUp() { if req.pw != nil { // race with call to Close in c.serveRequest doesn't matter because // Pipe(Reader|Writer).Close are idempotent + // Ignore the returned error as we continue in the loop anyway + //nolint:errcheck,revive req.pw.CloseWithError(ErrConnClosed) } } diff --git a/plugins/inputs/phpfpm/fcgi.go b/plugins/inputs/phpfpm/fcgi.go index 689660ea093c3..45248329efda6 100644 --- a/plugins/inputs/phpfpm/fcgi.go +++ b/plugins/inputs/phpfpm/fcgi.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package fcgi implements the FastCGI protocol. +// Package phpfpm implements the FastCGI protocol. // Currently only the responder role is supported. // The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 package phpfpm @@ -45,12 +45,14 @@ const ( maxPad = 255 ) +//nolint:varcheck // For having proper order const ( roleResponder = iota + 1 // only Responders are implemented. roleAuthorizer roleFilter ) +//nolint:varcheck // For having proper order const ( statusRequestComplete = iota statusCantMultiplex @@ -58,12 +60,10 @@ const ( statusUnknownRole ) -const headerLen = 8 - type header struct { Version uint8 Type recType - Id uint16 + ID uint16 ContentLength uint16 PaddingLength uint8 Reserved uint8 @@ -72,7 +72,7 @@ type header struct { type beginRequest struct { role uint16 flags uint8 - reserved [5]uint8 + reserved [5]uint8 //nolint:unused // Memory reservation } func (br *beginRequest) read(content []byte) error { @@ -88,10 +88,10 @@ func (br *beginRequest) read(content []byte) error { // not synchronized because we don't care what the contents are var pad [maxPad]byte -func (h *header) init(recType recType, reqId uint16, contentLength int) { +func (h *header) init(recType recType, reqID uint16, contentLength int) { h.Version = 1 h.Type = recType - h.Id = reqId + h.ID = reqID h.ContentLength = uint16(contentLength) h.PaddingLength = uint8(-contentLength & 7) } @@ -135,16 +135,16 @@ func (rec *record) read(r io.Reader) (err error) { return nil } -func (r *record) content() []byte { - return r.buf[:r.h.ContentLength] +func (rec *record) content() []byte { + return rec.buf[:rec.h.ContentLength] } // writeRecord writes and sends a single record. -func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { +func (c *conn) writeRecord(recType recType, reqID uint16, b []byte) error { c.mutex.Lock() defer c.mutex.Unlock() c.buf.Reset() - c.h.init(recType, reqId, len(b)) + c.h.init(recType, reqID, len(b)) if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { return err } @@ -158,20 +158,20 @@ func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { return err } -func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { +func (c *conn) writeBeginRequest(reqID uint16, role uint16, flags uint8) error { b := [8]byte{byte(role >> 8), byte(role), flags} - return c.writeRecord(typeBeginRequest, reqId, b[:]) + return c.writeRecord(typeBeginRequest, reqID, b[:]) } -func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { +func (c *conn) writeEndRequest(reqID uint16, appStatus int, protocolStatus uint8) error { b := make([]byte, 8) binary.BigEndian.PutUint32(b, uint32(appStatus)) b[4] = protocolStatus - return c.writeRecord(typeEndRequest, reqId, b) + return c.writeRecord(typeEndRequest, reqID, b) } -func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { - w := newWriter(c, recType, reqId) +func (c *conn) writePairs(recType recType, reqID uint16, pairs map[string]string) error { + w := newWriter(c, recType, reqID) b := make([]byte, 8) for k, v := range pairs { n := encodeSize(b, uint32(len(k))) @@ -186,8 +186,7 @@ func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string return err } } - w.Close() - return nil + return w.Close() } func readSize(s []byte) (uint32, int) { @@ -232,14 +231,16 @@ type bufWriter struct { func (w *bufWriter) Close() error { if err := w.Writer.Flush(); err != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive w.closer.Close() return err } return w.closer.Close() } -func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { - s := &streamWriter{c: c, recType: recType, reqId: reqId} +func newWriter(c *conn, recType recType, reqID uint16) *bufWriter { + s := &streamWriter{c: c, recType: recType, reqID: reqID} w := bufio.NewWriterSize(s, maxWrite) return &bufWriter{s, w} } @@ -249,7 +250,7 @@ func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { type streamWriter struct { c *conn recType recType - reqId uint16 + reqID uint16 } func (w *streamWriter) Write(p []byte) (int, error) { @@ -259,7 +260,7 @@ func (w *streamWriter) Write(p []byte) (int, error) { if n > maxWrite { n = maxWrite } - if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil { + if err := w.c.writeRecord(w.recType, w.reqID, p[:n]); err != nil { return nn, err } nn += n @@ -270,5 +271,5 @@ func (w *streamWriter) Write(p []byte) (int, error) { func (w *streamWriter) Close() error { // send empty record to close the stream - return w.c.writeRecord(w.recType, w.reqId, nil) + return w.c.writeRecord(w.recType, w.reqID, nil) } diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 9b42d91bd961a..56fb38188fb75 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -24,7 +24,7 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { laddr := net.UnixAddr{Name: args[0].(string), Net: h} con, err = net.DialUnix(h, nil, &laddr) default: - err = errors.New("fcgi: we only accept int (port) or string (socket) params.") + err = errors.New("fcgi: we only accept int (port) or string (socket) params") } fcgi := &conn{ rwc: con, @@ -33,25 +33,25 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { return fcgi, err } -func (client *conn) Request( +func (c *conn) Request( env map[string]string, requestData string, ) (retout []byte, reterr []byte, err error) { - defer client.rwc.Close() - var reqId uint16 = 1 + defer c.rwc.Close() + var reqID uint16 = 1 - err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) + err = c.writeBeginRequest(reqID, uint16(roleResponder), 0) if err != nil { return } - err = client.writePairs(typeParams, reqId, env) + err = c.writePairs(typeParams, reqID, env) if err != nil { return } if len(requestData) > 0 { - if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { + if err = c.writeRecord(typeStdin, reqID, []byte(requestData)); err != nil { return } } @@ -62,7 +62,7 @@ func (client *conn) Request( // receive until EOF or FCGI_END_REQUEST READ_LOOP: for { - err1 = rec.read(client.rwc) + err1 = rec.read(c.rwc) if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { if err1 != io.EOF { err = err1 diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index 15e0030a77151..7211c0c3971e1 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -8,11 +8,12 @@ import ( "bytes" "errors" "io" - "io/ioutil" "net/http" "testing" ) +const requestID uint16 = 1 + var sizeTests = []struct { size uint32 bytes []byte @@ -44,7 +45,7 @@ func TestSize(t *testing.T) { var streamTests = []struct { desc string recType recType - reqId uint16 + reqID uint16 content []byte raw []byte }{ @@ -90,8 +91,8 @@ outer: t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) continue } - if rec.h.Id != test.reqId { - t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) + if rec.h.ID != test.reqID { + t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.ID, test.reqID) continue } if !bytes.Equal(content, test.content) { @@ -100,7 +101,7 @@ outer: } buf.Reset() c := newConn(&nilCloser{buf}) - w := newWriter(c, test.recType, test.reqId) + w := newWriter(c, test.recType, test.reqID) if _, err := w.Write(test.content); err != nil { t.Errorf("%s: error writing record: %v", test.desc, err) continue @@ -124,7 +125,7 @@ func (c *writeOnlyConn) Write(p []byte) (int, error) { return len(p), nil } -func (c *writeOnlyConn) Read(p []byte) (int, error) { +func (c *writeOnlyConn) Read(_ []byte) (int, error) { return 0, errors.New("conn is write-only") } @@ -164,17 +165,16 @@ func nameValuePair11(nameData, valueData string) []byte { func makeRecord( recordType recType, - requestId uint16, contentData []byte, ) []byte { - requestIdB1 := byte(requestId >> 8) - requestIdB0 := byte(requestId) + requestIDB1 := byte(requestID >> 8) + requestIDB0 := byte(requestID) contentLength := len(contentData) contentLengthB1 := byte(contentLength >> 8) contentLengthB0 := byte(contentLength) return bytes.Join([][]byte{ - {1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1, + {1, byte(recordType), requestIDB1, requestIDB0, contentLengthB1, contentLengthB0, 0, 0}, contentData, }, @@ -185,14 +185,13 @@ func makeRecord( // request body var streamBeginTypeStdin = bytes.Join([][]byte{ // set up request 1 - makeRecord(typeBeginRequest, 1, - []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}), + makeRecord(typeBeginRequest, []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}), // add required parameters to request 1 - makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")), - makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")), - makeRecord(typeParams, 1, nil), + makeRecord(typeParams, nameValuePair11("REQUEST_METHOD", "GET")), + makeRecord(typeParams, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")), + makeRecord(typeParams, nil), // begin sending body of request 1 - makeRecord(typeStdin, 1, []byte("0123456789abcdef")), + makeRecord(typeStdin, []byte("0123456789abcdef")), }, nil) @@ -204,7 +203,7 @@ var cleanUpTests = []struct { { bytes.Join([][]byte{ streamBeginTypeStdin, - makeRecord(typeAbortRequest, 1, nil), + makeRecord(typeAbortRequest, nil), }, nil), ErrRequestAborted, @@ -242,7 +241,7 @@ func TestChildServeCleansUp(t *testing.T) { r *http.Request, ) { // block on reading body of request - _, err := io.Copy(ioutil.Discard, r.Body) + _, err := io.Copy(io.Discard, r.Body) if err != tt.err { t.Errorf("Expected %#v, got %#v", tt.err, err) } @@ -265,7 +264,7 @@ func (rwNopCloser) Close() error { } // Verifies it doesn't crash. Issue 11824. -func TestMalformedParams(t *testing.T) { +func TestMalformedParams(_ *testing.T) { input := []byte{ // beginRequest, requestId=1, contentLength=8, role=1, keepConn=1 1, 1, 0, 1, 0, 8, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, @@ -274,7 +273,7 @@ func TestMalformedParams(t *testing.T) { // end of params 1, 4, 0, 1, 0, 0, 0, 0, } - rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard} + rw := rwNopCloser{bytes.NewReader(input), io.Discard} c := newChild(rw, http.DefaultServeMux) c.serve() } diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index f191844a34d56..77c4bf0aeee56 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -7,32 +7,32 @@ import ( "io" "net/http" "net/url" - "os" "strconv" "strings" "sync" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) const ( - PF_POOL = "pool" - PF_PROCESS_MANAGER = "process manager" - PF_START_SINCE = "start since" - PF_ACCEPTED_CONN = "accepted conn" - PF_LISTEN_QUEUE = "listen queue" - PF_MAX_LISTEN_QUEUE = "max listen queue" - PF_LISTEN_QUEUE_LEN = "listen queue len" - PF_IDLE_PROCESSES = "idle processes" - PF_ACTIVE_PROCESSES = "active processes" - PF_TOTAL_PROCESSES = "total processes" - PF_MAX_ACTIVE_PROCESSES = "max active processes" - PF_MAX_CHILDREN_REACHED = "max children reached" - PF_SLOW_REQUESTS = "slow requests" + PfPool = "pool" + PfProcessManager = "process manager" + PfStartSince = "start since" + PfAcceptedConn = "accepted conn" + PfListenQueue = "listen queue" + PfMaxListenQueue = "max listen queue" + PfListenQueueLen = "listen queue len" + PfIdleProcesses = "idle processes" + PfActiveProcesses = "active processes" + PfTotalProcesses = "total processes" + PfMaxActiveProcesses = "max active processes" + PfMaxChildrenReached = "max children reached" + PfSlowRequests = "slow requests" ) type metric map[string]int64 @@ -40,7 +40,7 @@ type poolStat map[string]metric type phpfpm struct { Urls []string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -97,7 +97,7 @@ func (p *phpfpm) Init() error { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: p.Timeout.Duration, + Timeout: time.Duration(p.Timeout), } return nil } @@ -132,7 +132,7 @@ func (p *phpfpm) Gather(acc telegraf.Accumulator) error { // Request status page to get stat raw data and import it func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { - return p.gatherHttp(addr, acc) + return p.gatherHTTP(addr, acc) } var ( @@ -145,12 +145,12 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { u, err := url.Parse(addr) if err != nil { - return fmt.Errorf("Unable parse server address '%s': %s", addr, err) + return fmt.Errorf("unable parse server address '%s': %s", addr, err) } socketAddr := strings.Split(u.Host, ":") - fcgiIp := socketAddr[0] + fcgiIP := socketAddr[0] fcgiPort, _ := strconv.Atoi(socketAddr[1]) - fcgi, err = newFcgiClient(fcgiIp, fcgiPort) + fcgi, err = newFcgiClient(fcgiIP, fcgiPort) if err != nil { return err } @@ -189,19 +189,18 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula if len(fpmErr) == 0 && err == nil { importMetric(bytes.NewReader(fpmOutput), acc, addr) return nil - } else { - return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err) } + return fmt.Errorf("unable parse phpfpm status, error: %v %v", string(fpmErr), err) } // Gather stat using http protocol -func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error { +func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { u, err := url.Parse(addr) if err != nil { return fmt.Errorf("unable parse server address '%s': %v", addr, err) } - req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme, u.Host, u.Path), nil) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return fmt.Errorf("unable to create new request '%s': %v", addr, err) } @@ -221,7 +220,7 @@ func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error { } // Import stat data into Telegraf system -func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { +func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { stats := make(poolStat) var currentPool string @@ -235,7 +234,7 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { } fieldName := strings.Trim(keyvalue[0], " ") // We start to gather data for a new pool here - if fieldName == PF_POOL { + if fieldName == PfPool { currentPool = strings.Trim(keyvalue[1], " ") stats[currentPool] = make(metric) continue @@ -243,17 +242,17 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { // Start to parse metric for current pool switch fieldName { - case PF_START_SINCE, - PF_ACCEPTED_CONN, - PF_LISTEN_QUEUE, - PF_MAX_LISTEN_QUEUE, - PF_LISTEN_QUEUE_LEN, - PF_IDLE_PROCESSES, - PF_ACTIVE_PROCESSES, - PF_TOTAL_PROCESSES, - PF_MAX_ACTIVE_PROCESSES, - PF_MAX_CHILDREN_REACHED, - PF_SLOW_REQUESTS: + case PfStartSince, + PfAcceptedConn, + PfListenQueue, + PfMaxListenQueue, + PfListenQueueLen, + PfIdleProcesses, + PfActiveProcesses, + PfTotalProcesses, + PfMaxActiveProcesses, + PfMaxChildrenReached, + PfSlowRequests: fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64) if err == nil { stats[currentPool][fieldName] = fieldValue @@ -273,8 +272,6 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { } acc.AddFields("phpfpm", fields, tags) } - - return stats } func expandUrls(urls []string) ([]string, error) { @@ -301,25 +298,18 @@ func globUnixSocket(url string) ([]string, error) { } paths := glob.Match() if len(paths) == 0 { - if _, err := os.Stat(paths[0]); err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Socket doesn't exist '%s': %s", pattern, err) - } - return nil, err - } - return nil, nil + return nil, fmt.Errorf("socket doesn't exist %q", pattern) } - addrs := make([]string, 0, len(paths)) - + addresses := make([]string, 0, len(paths)) for _, path := range paths { if status != "" { path = path + ":" + status } - addrs = append(addrs, path) + addresses = append(addresses, path) } - return addrs, nil + return addresses, nil } func unixSocketPaths(addr string) (string, string) { diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index f3b72a8281b7e..d51c576aad7f0 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package phpfpm import ( @@ -18,32 +24,38 @@ import ( type statServer struct{} // We create a fake server to return test data -func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (s statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive fmt.Fprint(w, outputSample) } func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { - sv := statServer{} - ts := httptest.NewServer(sv) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "ok", r.URL.Query().Get("test")) + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) + _, err := fmt.Fprint(w, outputSample) + require.NoError(t, err) + })) defer ts.Close() + url := ts.URL + "?test=ok" r := &phpfpm{ - Urls: []string{ts.URL}, + Urls: []string{url}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", - "url": ts.URL, + "url": url, } fields := map[string]interface{}{ @@ -66,12 +78,11 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { // Let OS find an available port tcp, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("Cannot initialize test server") - } + require.NoError(t, err, "Cannot initialize test server") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) //Now we tested again above server @@ -79,12 +90,10 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -113,27 +122,24 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { // removing of socket fail when system restart /tmp is clear so // we don't have junk files around var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) r := &phpfpm{ Urls: []string{tcp.Addr().String()}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -162,40 +168,35 @@ func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { // removing of socket fail when system restart /tmp is clear so // we don't have junk files around var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) socket1 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) tcp1, err := net.Listen("unix", socket1) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp1.Close() - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) socket2 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) tcp2, err := net.Listen("unix", socket2) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp2.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp1, s) + //nolint:errcheck,revive go fcgi.Serve(tcp2, s) r := &phpfpm{ Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc1, acc2 testutil.Accumulator - err = acc1.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc1.GatherError(r.Gather)) - err = acc2.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc2.GatherError(r.Gather)) tags1 := map[string]string{ "pool": "www", @@ -230,27 +231,24 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // removing of socket fail we won't have junk files around. Cuz when system // restart, it clears out /tmp var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) r := &phpfpm{ Urls: []string{tcp.Addr().String() + ":custom-status-path"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -277,16 +275,15 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { //When not passing server config, we default to localhost //We just want to make sure we did request stat from localhost func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{} + r := &phpfpm{Urls: []string{"http://bad.localhost:62001/status"}} - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1/status") + assert.Contains(t, err.Error(), "/status") } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { @@ -294,12 +291,11 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t Urls: []string{"http://aninvalidone"}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) assert.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) assert.Contains(t, err.Error(), `lookup aninvalidone`) @@ -310,15 +306,13 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi Urls: []string{"/tmp/invalid.sock"}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Equal(t, `dial unix /tmp/invalid.sock: connect: no such file or directory`, err.Error()) - + assert.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) } const outputSample = ` diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 91af1b2ae33ed..10744a9b15e99 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -57,6 +57,9 @@ native Go by the Telegraf process, eliminating the need to execute the system ## option of the ping command. # interface = "" + ## Percentiles to calculate. This only works with the native method. + # percentiles = [50, 95, 99] + ## Specify the ping executable binary. # binary = "ping" @@ -67,6 +70,10 @@ native Go by the Telegraf process, eliminating the need to execute the system ## Use only IPv6 addresses when resolving a hostname. # ipv6 = false + + ## Number of data bytes to be sent. Corresponds to the "-s" + ## option of the ping command. This only works with the native method. + # size = 56 ``` #### File Limit @@ -93,13 +100,13 @@ LimitNOFILE=8192 Restart Telegraf: ```sh -$ systemctl edit telegraf +$ systemctl restart telegraf ``` #### Linux Permissions When using `method = "native"`, Telegraf will attempt to use privileged raw -ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities. +ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities or for Telegraf to be run as root. With systemd: ```sh @@ -124,18 +131,9 @@ setting capabilities. [man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html -When Telegraf cannot listen on a privileged ICMP socket it will attempt to use -ICMP echo sockets. If you wish to use this method you must ensure Telegraf's -group, usually `telegraf`, is allowed to use ICMP echo sockets: - -```sh -$ sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" -``` - -Reference [`man 7 icmp`][man 7 icmp] for more information about ICMP echo -sockets and the `ping_group_range` setting. +#### Other OS Permissions -[man 7 icmp]: http://man7.org/linux/man-pages/man7/icmp.7.html +When using `method = "native"`, you will need permissions similar to the executable ping program for your OS. ### Metrics @@ -147,10 +145,11 @@ sockets and the `ping_group_range` setting. - packets_received (integer) - percent_packet_loss (float) - ttl (integer, Not available on Windows) - - average_response_ms (integer) - - minimum_response_ms (integer) - - maximum_response_ms (integer) - - standard_deviation_ms (integer, Available on Windows only with native ping) + - average_response_ms (float) + - minimum_response_ms (float) + - maximum_response_ms (float) + - standard_deviation_ms (float, Available on Windows only with method = "native") + - percentile\_ms (float, Where `` is the percentile specified in `percentiles`. Available with method = "native" only) - errors (float, Windows only) - reply_received (integer, Windows with method = "exec" only) - percent_reply_loss (float, Windows with method = "exec" only) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 008cfceacc5b9..7d3b05178ad0b 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -1,35 +1,44 @@ package ping import ( - "context" "errors" - "log" + "fmt" "math" "net" "os/exec" "runtime" + "sort" "strings" "sync" "time" - "github.com/glinton/ping" + "github.com/go-ping/ping" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + defaultPingDataBytesSize = 56 +) + // HostPinger is a function that runs the "ping" function using a list of // passed arguments. This can be easily switched with a mocked ping function // for unit test purposes (see ping_test.go) type HostPinger func(binary string, timeout float64, args ...string) (string, error) -type HostResolver func(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) - -type IsCorrectNetwork func(ip net.IPAddr) bool - type Ping struct { + // wg is used to wait for ping with multiple URLs wg sync.WaitGroup + // Pre-calculated interval and timeout + calcInterval time.Duration + calcTimeout time.Duration + + sourceAddress string + + Log telegraf.Logger `toml:"-"` + // Interval at which to ping (ping -i ) PingInterval float64 `toml:"ping_interval"` @@ -64,11 +73,13 @@ type Ping struct { // host ping function pingHost HostPinger - // resolve host function - resolveHost HostResolver + nativePingFunc NativePingFunc + + // Calculate the given percentiles when using native method + Percentiles []int - // listenAddr is the address associated with the interface defined. - listenAddr string + // Packet size + Size *int } func (*Ping) Description() string { @@ -108,6 +119,9 @@ const sampleConfig = ` ## option of the ping command. # interface = "" + ## Percentiles to calculate. This only works with the native method. + # percentiles = [50, 95, 99] + ## Specify the ping executable binary. # binary = "ping" @@ -118,6 +132,10 @@ const sampleConfig = ` ## Use only IPv6 addresses when resolving a hostname. # ipv6 = false + + ## Number of data bytes to be sent. Corresponds to the "-s" + ## option of the ping command. This only works with the native method. + # size = 56 ` func (*Ping) SampleConfig() string { @@ -125,10 +143,6 @@ func (*Ping) SampleConfig() string { } func (p *Ping) Gather(acc telegraf.Accumulator) error { - if p.Interface != "" && p.listenAddr == "" { - p.listenAddr = getAddr(p.Interface) - } - for _, host := range p.Urls { p.wg.Add(1) go func(host string) { @@ -148,268 +162,157 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { return nil } -func getAddr(iface string) string { - if addr := net.ParseIP(iface); addr != nil { - return addr.String() - } +type pingStats struct { + ping.Statistics + ttl int +} - ifaces, err := net.Interfaces() - if err != nil { - return "" - } +type NativePingFunc func(destination string) (*pingStats, error) - var ip net.IP - for i := range ifaces { - if ifaces[i].Name == iface { - addrs, err := ifaces[i].Addrs() - if err != nil { - return "" - } - if len(addrs) > 0 { - switch v := addrs[0].(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - } - if len(ip) == 0 { - return "" - } - return ip.String() - } - } +func (p *Ping) nativePing(destination string) (*pingStats, error) { + ps := &pingStats{} + + pinger, err := ping.NewPinger(destination) + if err != nil { + return nil, fmt.Errorf("failed to create new pinger: %w", err) } - return "" -} + pinger.SetPrivileged(true) -func hostPinger(binary string, timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath(binary) - if err != nil { - return "", err + if p.IPv6 { + pinger.SetNetwork("ip6") } - c := exec.Command(bin, args...) - out, err := internal.CombinedOutputTimeout(c, - time.Second*time.Duration(timeout+5)) - return string(out), err -} -func filterIPs(addrs []net.IPAddr, filterFunc IsCorrectNetwork) []net.IPAddr { - n := 0 - for _, x := range addrs { - if filterFunc(x) { - addrs[n] = x - n++ + if p.Method == "native" { + pinger.Size = defaultPingDataBytesSize + if p.Size != nil { + pinger.Size = *p.Size } } - return addrs[:n] -} -func hostResolver(ctx context.Context, ipv6 bool, destination string) (*net.IPAddr, error) { - resolver := &net.Resolver{} - ips, err := resolver.LookupIPAddr(ctx, destination) + pinger.Source = p.sourceAddress + pinger.Interval = p.calcInterval - if err != nil { - return nil, err + if p.Deadline > 0 { + pinger.Timeout = time.Duration(p.Deadline) * time.Second } - if ipv6 { - ips = filterIPs(ips, isV6) - } else { - ips = filterIPs(ips, isV4) + // Get Time to live (TTL) of first response, matching original implementation + once := &sync.Once{} + pinger.OnRecv = func(pkt *ping.Packet) { + once.Do(func() { + ps.ttl = pkt.Ttl + }) } - if len(ips) == 0 { - return nil, errors.New("Cannot resolve ip address") + pinger.Count = p.Count + err = pinger.Run() + if err != nil { + if strings.Contains(err.Error(), "operation not permitted") { + if runtime.GOOS == "linux" { + return nil, fmt.Errorf("permission changes required, enable CAP_NET_RAW capabilities (refer to the ping plugin's README.md for more info)") + } + + return nil, fmt.Errorf("permission changes required, refer to the ping plugin's README.md for more info") + } + return nil, fmt.Errorf("%w", err) } - return &ips[0], err -} -func isV4(ip net.IPAddr) bool { - return ip.IP.To4() != nil -} + ps.Statistics = *pinger.Statistics() -func isV6(ip net.IPAddr) bool { - return !isV4(ip) + return ps, nil } func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { - ctx := context.Background() - interval := p.PingInterval - if interval < 0.2 { - interval = 0.2 - } + tags := map[string]string{"url": destination} + fields := map[string]interface{}{} - timeout := p.Timeout - if timeout == 0 { - timeout = 5 + stats, err := p.nativePingFunc(destination) + if err != nil { + p.Log.Errorf("ping failed: %s", err.Error()) + if strings.Contains(err.Error(), "unknown") { + fields["result_code"] = 1 + } else { + fields["result_code"] = 2 + } + acc.AddFields("ping", fields, tags) + return } - tick := time.NewTicker(time.Duration(interval * float64(time.Second))) - defer tick.Stop() - - if p.Deadline > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(p.Deadline)*time.Second) - defer cancel() + fields = map[string]interface{}{ + "result_code": 0, + "packets_transmitted": stats.PacketsSent, + "packets_received": stats.PacketsRecv, } - host, err := p.resolveHost(ctx, p.IPv6, destination) - if err != nil { - acc.AddFields( - "ping", - map[string]interface{}{"result_code": 1}, - map[string]string{"url": destination}, - ) - acc.AddError(err) + if stats.PacketsSent == 0 { + p.Log.Debug("no packets sent") + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) return } - resps := make(chan *ping.Response) - rsps := []*ping.Response{} - - r := &sync.WaitGroup{} - r.Add(1) - go func() { - for res := range resps { - rsps = append(rsps, res) - } - r.Done() - }() - - wg := &sync.WaitGroup{} - c := ping.Client{} - - var doErr error - var packetsSent int - - type sentReq struct { - err error - sent bool + if stats.PacketsRecv == 0 { + p.Log.Debug("no packets received") + fields["result_code"] = 1 + fields["percent_packet_loss"] = float64(100) + acc.AddFields("ping", fields, tags) + return } - sents := make(chan sentReq) - r.Add(1) - go func() { - for sent := range sents { - if sent.err != nil { - doErr = sent.err - } - if sent.sent { - packetsSent++ - } - } - r.Done() - }() - - for i := 0; i < p.Count; i++ { - select { - case <-ctx.Done(): - goto finish - case <-tick.C: - ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout*float64(time.Second))) - defer cancel() - - wg.Add(1) - go func(seq int) { - defer wg.Done() - resp, err := c.Do(ctx, &ping.Request{ - Dst: net.ParseIP(host.String()), - Src: net.ParseIP(p.listenAddr), - Seq: seq, - }) - - sent := sentReq{err: err, sent: true} - if err != nil { - if strings.Contains(err.Error(), "not permitted") { - sent.sent = false - } - sents <- sent - return - } - - resps <- resp - sents <- sent - }(i + 1) - } + sort.Sort(durationSlice(stats.Rtts)) + for _, perc := range p.Percentiles { + var value = percentile(durationSlice(stats.Rtts), perc) + var field = fmt.Sprintf("percentile%v_ms", perc) + fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond) } -finish: - wg.Wait() - close(resps) - close(sents) - - r.Wait() - - if doErr != nil && strings.Contains(doErr.Error(), "not permitted") { - log.Printf("D! [inputs.ping] %s", doErr.Error()) + // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + fields["ttl"] = stats.ttl } - tags, fields := onFin(packetsSent, rsps, doErr, destination) + fields["percent_packet_loss"] = float64(stats.PacketLoss) + fields["minimum_response_ms"] = float64(stats.MinRtt) / float64(time.Millisecond) + fields["average_response_ms"] = float64(stats.AvgRtt) / float64(time.Millisecond) + fields["maximum_response_ms"] = float64(stats.MaxRtt) / float64(time.Millisecond) + fields["standard_deviation_ms"] = float64(stats.StdDevRtt) / float64(time.Millisecond) + acc.AddFields("ping", fields, tags) } -func onFin(packetsSent int, resps []*ping.Response, err error, destination string) (map[string]string, map[string]interface{}) { - packetsRcvd := len(resps) +type durationSlice []time.Duration - tags := map[string]string{"url": destination} - fields := map[string]interface{}{ - "result_code": 0, - "packets_transmitted": packetsSent, - "packets_received": packetsRcvd, - } +func (p durationSlice) Len() int { return len(p) } +func (p durationSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - if packetsSent == 0 { - if err != nil { - fields["result_code"] = 2 - } - return tags, fields +// R7 from Hyndman and Fan (1996), which matches Excel +func percentile(values durationSlice, perc int) time.Duration { + if len(values) == 0 { + return 0 } - - if packetsRcvd == 0 { - if err != nil { - fields["result_code"] = 1 - } - fields["percent_packet_loss"] = float64(100) - return tags, fields + if perc < 0 { + perc = 0 } - - fields["percent_packet_loss"] = float64(packetsSent-packetsRcvd) / float64(packetsSent) * 100 - ttl := resps[0].TTL - - var min, max, avg, total time.Duration - min = resps[0].RTT - max = resps[0].RTT - - for _, res := range resps { - if res.RTT < min { - min = res.RTT - } - if res.RTT > max { - max = res.RTT - } - total += res.RTT + if perc > 100 { + perc = 100 } + var percFloat = float64(perc) / 100.0 - avg = total / time.Duration(packetsRcvd) - var sumsquares time.Duration - for _, res := range resps { - sumsquares += (res.RTT - avg) * (res.RTT - avg) - } - stdDev := time.Duration(math.Sqrt(float64(sumsquares / time.Duration(packetsRcvd)))) + var count = len(values) + var rank = percFloat * float64(count-1) + var rankInteger = int(rank) + var rankFraction = rank - math.Floor(rank) - // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go - switch runtime.GOOS { - case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - fields["ttl"] = ttl + if rankInteger >= count-1 { + return values[count-1] } - fields["minimum_response_ms"] = float64(min.Nanoseconds()) / float64(time.Millisecond) - fields["average_response_ms"] = float64(avg.Nanoseconds()) / float64(time.Millisecond) - fields["maximum_response_ms"] = float64(max.Nanoseconds()) / float64(time.Millisecond) - fields["standard_deviation_ms"] = float64(stdDev.Nanoseconds()) / float64(time.Millisecond) - - return tags, fields + upper := values[rankInteger+1] + lower := values[rankInteger] + return lower + time.Duration(rankFraction*float64(upper-lower)) } // Init ensures the plugin is configured correctly. @@ -418,14 +321,55 @@ func (p *Ping) Init() error { return errors.New("bad number of packets to transmit") } + // The interval cannot be below 0.2 seconds, matching ping implementation: https://linux.die.net/man/8/ping + if p.PingInterval < 0.2 { + p.calcInterval = time.Duration(.2 * float64(time.Second)) + } else { + p.calcInterval = time.Duration(p.PingInterval * float64(time.Second)) + } + + // If no timeout is given default to 5 seconds, matching original implementation + if p.Timeout == 0 { + p.calcTimeout = time.Duration(5) * time.Second + } else { + p.calcTimeout = time.Duration(p.Timeout) * time.Second + } + + // Support either an IP address or interface name + if p.Interface != "" { + if addr := net.ParseIP(p.Interface); addr != nil { + p.sourceAddress = p.Interface + } else { + i, err := net.InterfaceByName(p.Interface) + if err != nil { + return fmt.Errorf("failed to get interface: %w", err) + } + addrs, err := i.Addrs() + if err != nil { + return fmt.Errorf("failed to get the address of interface: %w", err) + } + p.sourceAddress = addrs[0].(*net.IPNet).IP.String() + } + } + return nil } +func hostPinger(binary string, timeout float64, args ...string) (string, error) { + bin, err := exec.LookPath(binary) + if err != nil { + return "", err + } + c := exec.Command(bin, args...) + out, err := internal.CombinedOutputTimeout(c, + time.Second*time.Duration(timeout+5)) + return string(out), err +} + func init() { inputs.Add("ping", func() telegraf.Input { - return &Ping{ + p := &Ping{ pingHost: hostPinger, - resolveHost: hostResolver, PingInterval: 1.0, Count: 1, Timeout: 1.0, @@ -433,6 +377,9 @@ func init() { Method: "exec", Binary: "ping", Arguments: []string{}, + Percentiles: []int{}, } + p.nativePingFunc = p.nativePing + return p }) } diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go index a014a8237e8e7..f6bd751c2a4e3 100644 --- a/plugins/inputs/ping/ping_notwindows.go +++ b/plugins/inputs/ping/ping_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 0c8cfb0939daa..7faba097c4562 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -1,15 +1,18 @@ +//go:build !windows // +build !windows package ping import ( - "context" "errors" - "net" + "fmt" "reflect" "sort" "testing" + "time" + "github.com/go-ping/ping" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -227,7 +230,7 @@ func TestArguments(t *testing.T) { } } -func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockHostPinger(_ string, _ float64, _ ...string) (string, error) { return linuxPingOutput, nil } @@ -239,7 +242,7 @@ func TestPingGather(t *testing.T) { pingHost: mockHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "localhost"} fields := map[string]interface{}{ "packets_transmitted": 5, @@ -258,6 +261,22 @@ func TestPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } +func TestPingGatherIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode, retrieves systems ping utility") + } + + var acc testutil.Accumulator + p, ok := inputs.Inputs["ping"]().(*Ping) + p.Log = testutil.Logger{} + require.True(t, ok) + p.Urls = []string{"localhost", "influxdata.com"} + require.NoError(t, acc.GatherError(p.Gather)) + + require.Equal(t, 0, acc.Metrics[0].Fields["result_code"]) + require.Equal(t, 0, acc.Metrics[1].Fields["result_code"]) +} + var lossyPingOutput = ` PING www.google.com (216.58.218.164) 56(84) bytes of data. 64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms @@ -269,7 +288,7 @@ PING www.google.com (216.58.218.164) 56(84) bytes of data. rtt min/avg/max/mdev = 35.225/44.033/51.806/5.325 ms ` -func mockLossyHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockLossyHostPinger(_ string, _ float64, _ ...string) (string, error) { return lossyPingOutput, nil } @@ -281,7 +300,7 @@ func TestLossyPingGather(t *testing.T) { pingHost: mockLossyHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "www.google.com"} fields := map[string]interface{}{ "packets_transmitted": 5, @@ -305,7 +324,7 @@ Request timeout for icmp_seq 0 2 packets transmitted, 0 packets received, 100.0% packet loss ` -func mockErrorHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockErrorHostPinger(_ string, _ float64, _ ...string) (string, error) { // This error will not trigger correct error paths return errorPingOutput, nil } @@ -319,7 +338,7 @@ func TestBadPingGather(t *testing.T) { pingHost: mockErrorHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "www.amazon.com"} fields := map[string]interface{}{ "packets_transmitted": 2, @@ -330,7 +349,7 @@ func TestBadPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } -func mockFatalHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockFatalHostPinger(_ string, _ float64, _ ...string) (string, error) { return fatalPingOutput, errors.New("So very bad") } @@ -342,7 +361,9 @@ func TestFatalPingGather(t *testing.T) { pingHost: mockFatalHostPinger, } - acc.GatherError(p.Gather) + err := acc.GatherError(p.Gather) + require.Error(t, err) + require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, So very bad") assert.False(t, acc.HasMeasurement("packets_transmitted"), "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("packets_received"), @@ -376,7 +397,7 @@ func TestErrorWithHostNamePingGather(t *testing.T) { return param.out, errors.New("So very bad") }, } - acc.GatherError(p.Gather) + require.Error(t, acc.GatherError(p.Gather)) assert.True(t, len(acc.Errors) > 0) assert.Contains(t, acc.Errors, param.error) } @@ -392,52 +413,124 @@ func TestPingBinary(t *testing.T) { return "", nil }, } - acc.GatherError(p.Gather) -} - -func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { - ipaddr := net.IPAddr{} - ipaddr.IP = net.IPv4(127, 0, 0, 1) - return &ipaddr, nil + err := acc.GatherError(p.Gather) + require.Error(t, err) + require.EqualValues(t, err.Error(), "Fatal error processing ping output: www.google.com") } // Test that Gather function works using native ping func TestPingGatherNative(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to permission requirements.") + type test struct { + P *Ping } - var acc testutil.Accumulator - p := Ping{ + fakePingFunc := func(destination string) (*pingStats, error) { + s := &pingStats{ + Statistics: ping.Statistics{ + PacketsSent: 5, + PacketsRecv: 5, + Rtts: []time.Duration{ + 3 * time.Millisecond, + 4 * time.Millisecond, + 1 * time.Millisecond, + 5 * time.Millisecond, + 2 * time.Millisecond, + }, + }, + ttl: 1, + } + + return s, nil + } + + tests := []test{ + { + P: &Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + Percentiles: []int{50, 95, 99}, + nativePingFunc: fakePingFunc, + }, + }, + { + P: &Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + PingInterval: 1, + Percentiles: []int{50, 95, 99}, + nativePingFunc: fakePingFunc, + }, + }, + } + + for _, tc := range tests { + var acc testutil.Accumulator + require.NoError(t, tc.P.Init()) + require.NoError(t, acc.GatherError(tc.P.Gather)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) + assert.True(t, acc.HasField("ping", "percentile50_ms")) + assert.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"]) + assert.True(t, acc.HasField("ping", "percentile95_ms")) + assert.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"]) + assert.True(t, acc.HasField("ping", "percentile99_ms")) + assert.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"]) + assert.True(t, acc.HasField("ping", "percent_packet_loss")) + assert.True(t, acc.HasField("ping", "minimum_response_ms")) + assert.True(t, acc.HasField("ping", "average_response_ms")) + assert.True(t, acc.HasField("ping", "maximum_response_ms")) + assert.True(t, acc.HasField("ping", "standard_deviation_ms")) + } +} + +func TestNoPacketsSent(t *testing.T) { + p := &Ping{ + Log: testutil.Logger{}, Urls: []string{"localhost", "127.0.0.2"}, Method: "native", Count: 5, - resolveHost: mockHostResolver, + Percentiles: []int{50, 95, 99}, + nativePingFunc: func(destination string) (*pingStats, error) { + s := &pingStats{ + Statistics: ping.Statistics{ + PacketsSent: 0, + PacketsRecv: 0, + }, + } + + return s, nil + }, } - assert.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) -} + var testAcc testutil.Accumulator + require.NoError(t, p.Init()) -func mockHostResolverError(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { - return nil, errors.New("myMock error") + p.pingToURLNative("localhost", &testAcc) + require.Zero(t, testAcc.Errors) + require.True(t, testAcc.HasField("ping", "result_code")) + require.Equal(t, 2, testAcc.Metrics[0].Fields["result_code"]) } // Test failed DNS resolutions func TestDNSLookupError(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to permission requirements.") + p := &Ping{ + Count: 1, + Log: testutil.Logger{}, + Urls: []string{"localhost"}, + Method: "native", + IPv6: false, + nativePingFunc: func(destination string) (*pingStats, error) { + return nil, fmt.Errorf("unknown") + }, } - var acc testutil.Accumulator - p := Ping{ - Urls: []string{"localhost"}, - Method: "native", - IPv6: false, - resolveHost: mockHostResolverError, - } + var testAcc testutil.Accumulator + require.NoError(t, p.Init()) - acc.GatherError(p.Gather) - assert.True(t, len(acc.Errors) > 0) + p.pingToURLNative("localhost", &testAcc) + require.Zero(t, testAcc.Errors) + require.True(t, testAcc.HasField("ping", "result_code")) + require.Equal(t, 1, testAcc.Metrics[0].Fields["result_code"]) } diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index f53d6f09a7373..1d3d933e7736b 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 4618ec4db4942..6df8af3732a5f 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping @@ -113,6 +114,7 @@ func mockErrorHostPinger(binary string, timeout float64, args ...string) (string func TestBadPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.amazon.com"}, pingHost: mockErrorHostPinger, } @@ -133,6 +135,7 @@ func TestBadPingGather(t *testing.T) { func TestArguments(t *testing.T) { arguments := []string{"-c", "3"} p := Ping{ + Log: testutil.Logger{}, Count: 2, Timeout: 12.0, Arguments: arguments, @@ -169,6 +172,7 @@ func mockLossyHostPinger(binary string, timeout float64, args ...string) (string func TestLossyPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockLossyHostPinger, } @@ -229,6 +233,7 @@ func mockFatalHostPinger(binary string, timeout float64, args ...string) (string func TestFatalPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.amazon.com"}, pingHost: mockFatalHostPinger, } @@ -274,6 +279,7 @@ func mockUnreachableHostPinger(binary string, timeout float64, args ...string) ( func TestUnreachablePingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockUnreachableHostPinger, } @@ -321,6 +327,7 @@ func mockTTLExpiredPinger(binary string, timeout float64, args ...string) (strin func TestTTLExpiredPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockTTLExpiredPinger, } @@ -351,6 +358,7 @@ func TestTTLExpiredPingGather(t *testing.T) { func TestPingBinary(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index 8700362d0d63f..e2d271f51cba1 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -1,3 +1,8 @@ +//go:build !windows +// +build !windows + +// postfix doesn't aim for Windows + package postfix import ( @@ -57,7 +62,7 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { } var age int64 if !oldest.IsZero() { - age = int64(time.Now().Sub(oldest) / time.Second) + age = int64(time.Since(oldest) / time.Second) } else if length != 0 { // system doesn't support ctime age = -1 diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 5dbc91d13e23f..6ab6556a0cf07 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -1,7 +1,9 @@ +//go:build !windows +// +build !windows + package postfix import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -12,7 +14,7 @@ import ( ) func TestGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) @@ -20,12 +22,12 @@ func TestGather(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755)) } - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) p := Postfix{ QueueDirectory: td, diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go new file mode 100644 index 0000000000000..3a2c5e5cb3619 --- /dev/null +++ b/plugins/inputs/postfix/postfix_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package postfix diff --git a/plugins/inputs/postfix/stat_ctim.go b/plugins/inputs/postfix/stat_ctim.go index 456df5ffd4dd2..06ddccb178fce 100644 --- a/plugins/inputs/postfix/stat_ctim.go +++ b/plugins/inputs/postfix/stat_ctim.go @@ -1,3 +1,4 @@ +//go:build dragonfly || linux || netbsd || openbsd || solaris // +build dragonfly linux netbsd openbsd solaris package postfix diff --git a/plugins/inputs/postfix/stat_ctimespec.go b/plugins/inputs/postfix/stat_ctimespec.go index 40e0de6cc4a40..03f4e0a435f2c 100644 --- a/plugins/inputs/postfix/stat_ctimespec.go +++ b/plugins/inputs/postfix/stat_ctimespec.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd // +build darwin freebsd package postfix diff --git a/plugins/inputs/postfix/stat_none.go b/plugins/inputs/postfix/stat_none.go index d9b67b1663af8..c1ca6a41c662f 100644 --- a/plugins/inputs/postfix/stat_none.go +++ b/plugins/inputs/postfix/stat_none.go @@ -1,3 +1,4 @@ +//go:build !dragonfly && !linux && !netbsd && !openbsd && !solaris && !darwin && !freebsd // +build !dragonfly,!linux,!netbsd,!openbsd,!solaris,!darwin,!freebsd package postfix diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 0911b20ce7184..a90f571b7a7a0 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -6,10 +6,10 @@ import ( "strings" // register in driver. - _ "github.com/jackc/pgx/stdlib" + _ "github.com/jackc/pgx/v4/stdlib" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -105,26 +105,26 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { query = `SELECT * FROM pg_stat_bgwriter` - bg_writer_row, err := p.DB.Query(query) + bgWriterRow, err := p.DB.Query(query) if err != nil { return err } - defer bg_writer_row.Close() + defer bgWriterRow.Close() // grab the column information from the result - if columns, err = bg_writer_row.Columns(); err != nil { + if columns, err = bgWriterRow.Columns(); err != nil { return err } - for bg_writer_row.Next() { - err = p.accRow(bg_writer_row, acc, columns) + for bgWriterRow.Next() { + err = p.accRow(bgWriterRow, acc, columns) if err != nil { return err } } - return bg_writer_row.Err() + return bgWriterRow.Err() } type scanner interface { @@ -156,13 +156,19 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str if columnMap["datname"] != nil { // extract the database name from the column map if dbNameStr, ok := (*columnMap["datname"]).(string); ok { - dbname.WriteString(dbNameStr) + if _, err := dbname.WriteString(dbNameStr); err != nil { + return err + } } else { // PG 12 adds tracking of global objects to pg_stat_database - dbname.WriteString("postgres_global") + if _, err := dbname.WriteString("postgres_global"); err != nil { + return err + } } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } var tagAddress string @@ -189,11 +195,9 @@ func init() { inputs.Add("postgresql", func() telegraf.Input { return &Postgresql{ Service: Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: false, }, } diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index b23321019f5f8..934d06414b7e6 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestPostgresqlGeneratesMetrics(t *testing.T) { +func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -94,7 +94,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } -func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { +func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -120,7 +120,7 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { assert.Equal(t, "postgres", point.Tags["db"]) } -func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { +func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -153,7 +153,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { assert.True(t, found) } -func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { +func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -176,7 +176,7 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { } } -func TestPostgresqlDatabaseWhitelistTest(t *testing.T) { +func TestPostgresqlDatabaseWhitelistTestIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -216,7 +216,7 @@ func TestPostgresqlDatabaseWhitelistTest(t *testing.T) { assert.False(t, foundTemplate1) } -func TestPostgresqlDatabaseBlacklistTest(t *testing.T) { +func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 96a9a63175658..e0793d4d2dbd6 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -3,17 +3,18 @@ package postgresql import ( "database/sql" "fmt" - "github.com/jackc/pgx" - "github.com/jackc/pgx/pgtype" - "github.com/jackc/pgx/stdlib" "net" "net/url" "regexp" "sort" "strings" + "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) // pulled from lib/pq @@ -88,14 +89,16 @@ func parseURL(uri string) (string, error) { // packages. type Service struct { Address string - Outputaddress string + OutputAddress string MaxIdle int MaxOpen int - MaxLifetime internal.Duration + MaxLifetime config.Duration DB *sql.DB IsPgBouncer bool } +var socketRegexp = regexp.MustCompile(`/\.s\.PGSQL\.\d+$`) + // Start starts the ServiceInput's service, whatever that may be func (p *Service) Start(telegraf.Accumulator) (err error) { const localhost = "host=localhost sslmode=disable" @@ -104,53 +107,38 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { p.Address = localhost } - connectionString := p.Address + connConfig, err := pgx.ParseConfig(p.Address) + if err != nil { + return err + } + + // Remove the socket name from the path + connConfig.Host = socketRegexp.ReplaceAllLiteralString(connConfig.Host, "") // Specific support to make it work with PgBouncer too // See https://github.com/influxdata/telegraf/issues/3253#issuecomment-357505343 if p.IsPgBouncer { - d := &stdlib.DriverConfig{ - ConnConfig: pgx.ConnConfig{ - PreferSimpleProtocol: true, - RuntimeParams: map[string]string{ - "client_encoding": "UTF8", - }, - CustomConnInfo: func(c *pgx.Conn) (*pgtype.ConnInfo, error) { - info := c.ConnInfo.DeepCopy() - info.RegisterDataType(pgtype.DataType{ - Value: &pgtype.OIDValue{}, - Name: "int8OID", - OID: pgtype.Int8OID, - }) - // Newer versions of pgbouncer need this defined. See the discussion here: - // https://github.com/jackc/pgx/issues/649 - info.RegisterDataType(pgtype.DataType{ - Value: &pgtype.OIDValue{}, - Name: "numericOID", - OID: pgtype.NumericOID, - }) - - return info, nil - }, - }, - } - stdlib.RegisterDriverConfig(d) - connectionString = d.ConnectionString(p.Address) + // Remove DriveConfig and revert it by the ParseConfig method + // See https://github.com/influxdata/telegraf/issues/9134 + connConfig.PreferSimpleProtocol = true } + connectionString := stdlib.RegisterConnConfig(connConfig) if p.DB, err = sql.Open("pgx", connectionString); err != nil { return err } p.DB.SetMaxOpenConns(p.MaxOpen) p.DB.SetMaxIdleConns(p.MaxIdle) - p.DB.SetConnMaxLifetime(p.MaxLifetime.Duration) + p.DB.SetConnMaxLifetime(time.Duration(p.MaxLifetime)) return nil } // Stop stops the services and closes any necessary channels and connections func (p *Service) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive p.DB.Close() } @@ -162,8 +150,8 @@ func (p *Service) SanitizedAddress() (sanitizedAddress string, err error) { canonicalizedAddress string ) - if p.Outputaddress != "" { - return p.Outputaddress, nil + if p.OutputAddress != "" { + return p.OutputAddress, nil } if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") { diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index abbdd07f43d1b..70464140aedf4 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -52,12 +52,17 @@ The example below has two queries are specified, with the following parameters: # defined tags. The values in these columns must be of a string-type, # a number-type or a blob-type. # + # The timestamp field is used to override the data points timestamp value. By + # default, all rows inserted with current time. By setting a timestamp column, + # the row will be inserted with that column's value. + # # Structure : # [[inputs.postgresql_extensible.query]] # sqlquery string # version string # withdbname boolean # tagvalue string (coma separated) + # timestamp string [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database where datname" version=901 diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index f91feaf407d49..176827a4b1dc7 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -3,14 +3,15 @@ package postgresql_extensible import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strings" + "time" - _ "github.com/jackc/pgx/stdlib" + _ "github.com/jackc/pgx/v4/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" ) @@ -19,6 +20,7 @@ type Postgresql struct { postgresql.Service Databases []string AdditionalTags []string + Timestamp string Query query Debug bool @@ -32,6 +34,7 @@ type query []struct { Withdbname bool Tagvalue string Measurement string + Timestamp string } var ignoredColumns = map[string]bool{"stats_reset": true} @@ -80,7 +83,16 @@ var sampleConfig = ` ## output measurement name ("postgresql"). ## ## The script option can be used to specify the .sql file path. - ## If script and sqlquery options specified at same time, sqlquery will be used + ## If script and sqlquery options specified at same time, sqlquery will be used + ## + ## the tagvalue field is used to define custom tags (separated by comas). + ## the query is expected to return columns which match the names of the + ## defined tags. The values in these columns must be of a string-type, + ## a number-type or a blob-type. + ## + ## The timestamp field is used to override the data points timestamp value. By + ## default, all rows inserted with current time. By setting a timestamp column, + ## the row will be inserted with that column's value. ## ## Structure : ## [[inputs.postgresql_extensible.query]] @@ -89,6 +101,7 @@ var sampleConfig = ` ## withdbname boolean ## tagvalue string (comma separated) ## measurement string + ## timestamp string [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" version=901 @@ -134,7 +147,7 @@ func ReadQueryFromFile(filePath string) (string, error) { } defer file.Close() - query, err := ioutil.ReadAll(file) + query, err := io.ReadAll(file) if err != nil { return "", err } @@ -143,48 +156,49 @@ func ReadQueryFromFile(filePath string) (string, error) { func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var ( - err error - sql_query string - query_addon string - db_version int - query string - tag_value string - meas_name string - columns []string + err error + sqlQuery string + queryAddon string + dbVersion int + query string + tagValue string + measName string + timestamp string + columns []string ) // Retrieving the database version query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` - if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { - db_version = 0 + if err = p.DB.QueryRow(query).Scan(&dbVersion); err != nil { + dbVersion = 0 } // We loop in order to process each query // Query is not run if Database version does not match the query version. for i := range p.Query { - sql_query = p.Query[i].Sqlquery - tag_value = p.Query[i].Tagvalue + sqlQuery = p.Query[i].Sqlquery + tagValue = p.Query[i].Tagvalue + timestamp = p.Query[i].Timestamp if p.Query[i].Measurement != "" { - meas_name = p.Query[i].Measurement + measName = p.Query[i].Measurement } else { - meas_name = "postgresql" + measName = "postgresql" } if p.Query[i].Withdbname { if len(p.Databases) != 0 { - query_addon = fmt.Sprintf(` IN ('%s')`, - strings.Join(p.Databases, "','")) + queryAddon = fmt.Sprintf(` IN ('%s')`, strings.Join(p.Databases, "','")) } else { - query_addon = " is not null" + queryAddon = " is not null" } } else { - query_addon = "" + queryAddon = "" } - sql_query += query_addon + sqlQuery += queryAddon - if p.Query[i].Version <= db_version { - rows, err := p.DB.Query(sql_query) + if p.Query[i].Version <= dbVersion { + rows, err := p.DB.Query(sqlQuery) if err != nil { p.Log.Error(err.Error()) continue @@ -199,15 +213,17 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { } p.AdditionalTags = nil - if tag_value != "" { - tag_list := strings.Split(tag_value, ",") - for t := range tag_list { - p.AdditionalTags = append(p.AdditionalTags, tag_list[t]) + if tagValue != "" { + tagList := strings.Split(tagValue, ",") + for t := range tagList { + p.AdditionalTags = append(p.AdditionalTags, tagList[t]) } } + p.Timestamp = timestamp + for rows.Next() { - err = p.accRow(meas_name, rows, acc, columns) + err = p.accRow(measName, rows, acc, columns) if err != nil { p.Log.Error(err.Error()) break @@ -222,12 +238,13 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumulator, columns []string) error { +func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulator, columns []string) error { var ( err error columnVars []interface{} dbname bytes.Buffer tagAddress string + timestamp time.Time ) // this is where we'll store the column name with its *interface{} @@ -251,12 +268,18 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula // extract the database name from the column map switch datname := (*c).(type) { case string: - dbname.WriteString(datname) + if _, err := dbname.WriteString(datname); err != nil { + return err + } default: - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } if tagAddress, err = p.SanitizedAddress(); err != nil { @@ -269,6 +292,9 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula "db": dbname.String(), } + // set default timestamp to Now + timestamp = time.Now() + fields := make(map[string]interface{}) COLUMN: for col, val := range columnMap { @@ -278,6 +304,13 @@ COLUMN: continue } + if col == p.Timestamp { + if v, ok := (*val).(time.Time); ok { + timestamp = v + } + continue + } + for _, tag := range p.AdditionalTags { if col != tag { continue @@ -301,7 +334,7 @@ COLUMN: fields[col] = *val } } - acc.AddFields(meas_name, fields, tags) + acc.AddFields(measName, fields, tags, timestamp) return nil } @@ -309,11 +342,9 @@ func init() { inputs.Add("postgresql_extensible", func() telegraf.Input { return &Postgresql{ Service: postgresql.Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: false, }, } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index bca009f167cf7..399c236bffcea 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "testing" + "time" "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" @@ -25,13 +26,13 @@ func queryRunner(t *testing.T, q query) *testutil.Accumulator { Query: q, } var acc testutil.Accumulator - p.Start(&acc) - p.Init() + require.NoError(t, p.Init()) + require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) return &acc } -func TestPostgresqlGeneratesMetrics(t *testing.T) { +func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -98,7 +99,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } -func TestPostgresqlQueryOutputTests(t *testing.T) { +func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) { const measurement = "postgresql" if testing.Short() { @@ -126,6 +127,13 @@ func TestPostgresqlQueryOutputTests(t *testing.T) { assert.True(t, found) assert.Equal(t, true, v) }, + "SELECT timestamp'1980-07-23' as ts, true AS myvalue": func(acc *testutil.Accumulator) { + expectedTime := time.Date(1980, 7, 23, 0, 0, 0, 0, time.UTC) + v, found := acc.BoolField(measurement, "myvalue") + assert.True(t, found) + assert.Equal(t, true, v) + assert.True(t, acc.HasTimestamp(measurement, expectedTime)) + }, } for q, assertions := range examples { @@ -134,12 +142,13 @@ func TestPostgresqlQueryOutputTests(t *testing.T) { Version: 901, Withdbname: false, Tagvalue: "", + Timestamp: "ts", }}) assertions(acc) } } -func TestPostgresqlFieldOutput(t *testing.T) { +func TestPostgresqlFieldOutputIntegration(t *testing.T) { const measurement = "postgresql" if testing.Short() { t.Skip("Skipping integration test in short mode") @@ -222,13 +231,13 @@ func TestPostgresqlSqlScript(t *testing.T) { Query: q, } var acc testutil.Accumulator - p.Start(&acc) - p.Init() + require.NoError(t, p.Init()) + require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) } -func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { +func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -280,15 +289,15 @@ type fakeRow struct { func (f fakeRow) Scan(dest ...interface{}) error { if len(f.fields) != len(dest) { - return errors.New("Nada matchy buddy") + return errors.New("nada matchy buddy") } for i, d := range dest { - switch d.(type) { - case (*interface{}): - *d.(*interface{}) = f.fields[i] + switch d := d.(type) { + case *interface{}: + *d = f.fields[i] default: - return fmt.Errorf("Bad type %T", d) + return fmt.Errorf("bad type %T", d) } } return nil diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 3c661990cee4c..5421c926a7745 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -56,14 +56,16 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error defer conn.Close() - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) // Send command if _, err := fmt.Fprint(conn, "show * \n"); err != nil { - return nil + return err } if err := rw.Flush(); err != nil { return err diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index fe64be5db62eb..bf7d3845f7dc9 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -3,11 +3,14 @@ package powerdns import ( "fmt" "net" + "os" + "path/filepath" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -48,7 +51,6 @@ var intOverflowMetrics = "corrupt-packets=18446744073709550195,deferred-cache-in "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," func (s statServer) serverSocket(l net.Listener) { - for { conn, err := l.Accept() if err != nil { @@ -61,7 +63,11 @@ func (s statServer) serverSocket(l net.Listener) { data := buf[:n] if string(data) == "show * \n" { + // Ignore the returned error as we need to close the socket anyway + //nolint:errcheck,revive c.Write([]byte(metrics)) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() } }(conn) @@ -71,7 +77,8 @@ func (s statServer) serverSocket(l net.Listener) { func TestPowerdnsGeneratesMetrics(t *testing.T) { // We create a fake server to return test data randomNumber := int64(5239846799706671610) - socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)) + sockname := filepath.Join(os.TempDir(), fmt.Sprintf("pdns%d.controlsocket", randomNumber)) + socket, err := net.Listen("unix", sockname) if err != nil { t.Fatal("Cannot initialize server on port ") } @@ -82,11 +89,10 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { go s.serverSocket(socket) p := &Powerdns{ - UnixSockets: []string{fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)}, + UnixSockets: []string{sockname}, } var acc testutil.Accumulator - err = acc.GatherError(p.Gather) require.NoError(t, err) diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index d040d8355329d..190297f9f58a1 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -97,14 +97,16 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator } defer conn.Close() - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) // Send command if _, err := fmt.Fprint(rw, "get-all\n"); err != nil { - return nil + return err } if err := rw.Flush(); err != nil { return err @@ -130,9 +132,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator acc.AddFields("powerdns_recursor", fields, tags) - conn.Close() - - return nil + return conn.Close() } func parseResponse(metrics string) map[string]interface{} { diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index d0f5690cc31cb..e715fe4e2d165 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -13,8 +13,6 @@ import ( "github.com/stretchr/testify/require" ) -type statServer struct{} - var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + @@ -99,25 +97,26 @@ var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36 "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("Skipping test on darwin") + if runtime.GOOS == "darwin" || runtime.GOOS == "windows" { + t.Skip("Skipping on windows and darwin, as unixgram sockets are not supported") } // We create a fake server to return test data controlSocket := "/tmp/pdns5724354148158589552.controlsocket" addr, err := net.ResolveUnixAddr("unixgram", controlSocket) - if err != nil { - t.Fatal("Cannot parse unix socket") - } + require.NoError(t, err, "Cannot parse unix socket") socket, err := net.ListenUnixgram("unixgram", addr) - if err != nil { - t.Fatal("Cannot initialize server on port") - } + require.NoError(t, err, "Cannot initialize server on port") var wg sync.WaitGroup wg.Add(1) go func() { defer func() { + // Ignore the returned error as we need to remove the socket file anyway + //nolint:errcheck,revive socket.Close() + // Ignore the returned error as we want to remove the file and ignore + // no-such-file errors + //nolint:errcheck,revive os.Remove(controlSocket) wg.Done() }() @@ -126,13 +125,19 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { buf := make([]byte, 1024) n, remote, err := socket.ReadFromUnix(buf) if err != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive socket.Close() return } data := buf[:n] if string(data) == "get-all\n" { + // Ignore the returned error as we need to close the socket anyway + //nolint:errcheck,revive socket.WriteToUnix([]byte(metrics), remote) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive socket.Close() } @@ -145,13 +150,11 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { SocketDir: "/tmp", SocketMode: "0666", } - err = p.Init() - require.NoError(t, err) + require.NoError(t, p.Init()) var acc testutil.Accumulator - err = acc.GatherError(p.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(p.Gather)) wg.Wait() @@ -299,14 +302,10 @@ func TestPowerdnsRecursorParseMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) + if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { continue } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } @@ -424,14 +423,10 @@ func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) + if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { continue } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } @@ -549,13 +544,9 @@ func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) + if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { continue } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 61092aad96998..070dce65fe2a0 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes @@ -5,7 +6,6 @@ package processes import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -129,7 +129,6 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { // get process states from /proc/(pid)/stat files func (p *Processes) gatherFromProc(fields map[string]interface{}) error { filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat") - if err != nil { return err } @@ -192,11 +191,7 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } func readProcFile(filename string) ([]byte, error) { - _, err := os.Stat(filename) - if err != nil { - return nil, err - } - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { return nil, nil diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index 630ecd65e7666..144b80f3fc1ec 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes @@ -15,10 +16,11 @@ import ( ) func TestProcesses(t *testing.T) { + tester := tester{} processes := &Processes{ Log: testutil.Logger{}, execPS: testExecPS("STAT\n Ss \n S \n Z \n R \n S< \n SNs \n Ss+ \n \n \n"), - readProcFile: readProcFile, + readProcFile: tester.testProcFile, } var acc testutil.Accumulator @@ -188,7 +190,7 @@ func (t *tester) testProcFile2(_ string) ([]byte, error) { } func testExecPSError() ([]byte, error) { - return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), fmt.Errorf("ERROR!") + return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), fmt.Errorf("error") } const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go index 567373c7c7260..f798a1668c738 100644 --- a/plugins/inputs/processes/processes_windows.go +++ b/plugins/inputs/processes/processes_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package processes diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 3803215697ec7..f0b9858601ade 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -26,8 +26,9 @@ Processes can be selected for monitoring using one of several methods: # pattern = "nginx" ## user as argument for pgrep (ie, pgrep -u ) # user = "nginx" - ## Systemd unit name + ## Systemd unit name, supports globs when include_systemd_children is set to true # systemd_unit = "nginx.service" + # include_systemd_children = false ## CGroup name or path # cgroup = "systemd/system.slice/nginx.service" @@ -44,6 +45,9 @@ Processes can be selected for monitoring using one of several methods: ## When true add the full cmdline as a tag. # cmdline_tag = false + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + ## Add the PID as a tag instead of as a field. When collecting multiple ## processes with otherwise matching tags this setting should be enabled to ## ensure each process has a unique identity. @@ -64,15 +68,6 @@ Processes can be selected for monitoring using one of several methods: Preliminary support for Windows has been added, however you may prefer using the `win_perf_counters` input plugin as a more mature alternative. -When using the `pid_finder = "native"` in Windows, the pattern lookup method is -implemented as a WMI query. The pattern allows fuzzy matching using only -[WMI query patterns](https://msdn.microsoft.com/en-us/library/aa392263(v=vs.85).aspx): -```toml -[[inputs.procstat]] - pattern = "%influx%" - pid_finder = "native" -``` - ### Metrics: - procstat @@ -86,6 +81,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - user (when selected) - systemd_unit (when defined) - cgroup (when defined) + - cgroup_full (when cgroup or systemd_unit is used with glob) - win_service (when defined) - fields: - child_major_faults (int) diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 5f286dd64a63e..05cf4a72735f0 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -20,7 +20,7 @@ func NewNativeFinder() (PIDFinder, error) { } //Uid will return all pids for the given user -func (pg *NativeFinder) Uid(user string) ([]PID, error) { +func (pg *NativeFinder) UID(user string) ([]PID, error) { var dst []PID procs, err := process.Processes() if err != nil { @@ -43,7 +43,7 @@ func (pg *NativeFinder) Uid(user string) ([]PID, error) { //PidFile returns the pid from the pid file given. func (pg *NativeFinder) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) @@ -54,7 +54,6 @@ func (pg *NativeFinder) PidFile(path string) ([]PID, error) { } pids = append(pids, PID(pid)) return pids, nil - } //FullPattern matches on the command line when the process was executed diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go index 9d7409ba1df8e..528b083ae628b 100644 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ b/plugins/inputs/procstat/native_finder_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index ef9c5ffb11523..0148fdedca933 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestGather_RealPattern(t *testing.T) { +func TestGather_RealPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -22,7 +22,7 @@ func TestGather_RealPattern(t *testing.T) { assert.Equal(t, len(pids) > 0, true) } -func TestGather_RealFullPattern(t *testing.T) { +func TestGather_RealFullPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -34,7 +34,7 @@ func TestGather_RealFullPattern(t *testing.T) { assert.Equal(t, len(pids) > 0, true) } -func TestGather_RealUser(t *testing.T) { +func TestGather_RealUserIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -42,7 +42,7 @@ func TestGather_RealUser(t *testing.T) { require.NoError(t, err) pg, err := NewNativeFinder() require.NoError(t, err) - pids, err := pg.Uid(user.Username) + pids, err := pg.UID(user.Username) require.NoError(t, err) fmt.Println(pids) assert.Equal(t, len(pids) > 0, true) diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 37f9dfc3f67a9..34c44e0b2fefb 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "os/exec" "strconv" "strings" @@ -25,7 +25,7 @@ func NewPgrep() (PIDFinder, error) { func (pg *Pgrep) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) @@ -43,7 +43,7 @@ func (pg *Pgrep) Pattern(pattern string) ([]PID, error) { return find(pg.path, args) } -func (pg *Pgrep) Uid(user string) ([]PID, error) { +func (pg *Pgrep) UID(user string) ([]PID, error) { args := []string{"-u", user} return find(pg.path, args) } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 042929f0864cf..a8d8f3f51bfbd 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -26,12 +26,13 @@ type Process interface { RlimitUsage(bool) ([]process.RlimitStat, error) Username() (string, error) CreateTime() (int64, error) + Ppid() (int32, error) } type PIDFinder interface { PidFile(path string) ([]PID, error) Pattern(pattern string) ([]PID, error) - Uid(user string) ([]PID, error) + UID(user string) ([]PID, error) FullPattern(path string) ([]PID, error) } @@ -67,11 +68,11 @@ func (p *Proc) Username() (string, error) { return p.Process.Username() } -func (p *Proc) Percent(interval time.Duration) (float64, error) { - cpu_perc, err := p.Process.Percent(time.Duration(0)) +func (p *Proc) Percent(_ time.Duration) (float64, error) { + cpuPerc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true - return 0, fmt.Errorf("Must call Percent twice to compute percent cpu.") + return 0, fmt.Errorf("must call Percent twice to compute percent cpu") } - return cpu_perc, err + return cpuPerc, err } diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 1d6af5df42246..09b5cc7cfa325 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -3,10 +3,12 @@ package procstat import ( "bytes" "fmt" - "io/ioutil" + "os" "os/exec" "path/filepath" + "runtime" "strconv" + "strings" "time" "github.com/influxdata/telegraf" @@ -22,18 +24,22 @@ var ( type PID int32 type Procstat struct { - PidFinder string `toml:"pid_finder"` - PidFile string `toml:"pid_file"` - Exe string - Pattern string - Prefix string - CmdLineTag bool `toml:"cmdline_tag"` - ProcessName string - User string - SystemdUnit string - CGroup string `toml:"cgroup"` - PidTag bool - WinService string `toml:"win_service"` + PidFinder string `toml:"pid_finder"` + PidFile string `toml:"pid_file"` + Exe string + Pattern string + Prefix string + CmdLineTag bool `toml:"cmdline_tag"` + ProcessName string + User string + SystemdUnit string `toml:"systemd_unit"` + IncludeSystemdChildren bool `toml:"include_systemd_children"` + CGroup string `toml:"cgroup"` + PidTag bool + WinService string `toml:"win_service"` + Mode string + + solarisMode bool finder PIDFinder @@ -51,9 +57,10 @@ var sampleConfig = ` # pattern = "nginx" ## user as argument for pgrep (ie, pgrep -u ) # user = "nginx" - ## Systemd unit name + ## Systemd unit name, supports globs when include_systemd_children is set to true # systemd_unit = "nginx.service" - ## CGroup name or path + # include_systemd_children = false + ## CGroup name or path, supports globs # cgroup = "systemd/system.slice/nginx.service" ## Windows service name @@ -69,6 +76,9 @@ var sampleConfig = ` ## When true add the full cmdline as a tag. # cmdline_tag = false + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + ## Add the PID as a tag instead of as a field. When collecting multiple ## processes with otherwise matching tags this setting should be enabled to ## ensure each process has a unique identity. @@ -84,14 +94,20 @@ var sampleConfig = ` # pid_finder = "pgrep" ` -func (_ *Procstat) SampleConfig() string { +func (p *Procstat) SampleConfig() string { return sampleConfig } -func (_ *Procstat) Description() string { +func (p *Procstat) Description() string { return "Monitor process cpu and memory usage" } +type PidsTags struct { + PIDS []PID + Tags map[string]string + Err error +} + func (p *Procstat) Gather(acc telegraf.Accumulator) error { if p.createPIDFinder == nil { switch p.PidFinder { @@ -103,52 +119,68 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.PidFinder = "pgrep" p.createPIDFinder = defaultPIDFinder } - } if p.createProcess == nil { p.createProcess = defaultProcess } - pids, tags, err := p.findPids(acc) - if err != nil { - fields := map[string]interface{}{ - "pid_count": 0, - "running": 0, - "result_code": 1, + pidCount := 0 + now := time.Now() + newProcs := make(map[PID]Process, len(p.procs)) + pidTags := p.findPids() + for _, pidTag := range pidTags { + pids := pidTag.PIDS + tags := pidTag.Tags + err := pidTag.Err + pidCount += len(pids) + if err != nil { + fields := map[string]interface{}{ + "pid_count": 0, + "running": 0, + "result_code": 1, + } + tags := map[string]string{ + "pid_finder": p.PidFinder, + "result": "lookup_error", + } + acc.AddFields("procstat_lookup", fields, tags, now) + return err } - tags := map[string]string{ - "pid_finder": p.PidFinder, - "result": "lookup_error", + + err = p.updateProcesses(pids, tags, p.procs, newProcs) + if err != nil { + acc.AddError(fmt.Errorf("procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) } - acc.AddFields("procstat_lookup", fields, tags) - return err } - procs, err := p.updateProcesses(pids, tags, p.procs) - if err != nil { - acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", - p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) + p.procs = newProcs + for _, proc := range p.procs { + p.addMetric(proc, acc, now) } - p.procs = procs - for _, proc := range p.procs { - p.addMetric(proc, acc) + tags := make(map[string]string) + for _, pidTag := range pidTags { + for key, value := range pidTag.Tags { + tags[key] = value + } } fields := map[string]interface{}{ - "pid_count": len(pids), - "running": len(procs), + "pid_count": pidCount, + "running": len(p.procs), "result_code": 0, } + tags["pid_finder"] = p.PidFinder tags["result"] = "success" - acc.AddFields("procstat_lookup", fields, tags) + acc.AddFields("procstat_lookup", fields, tags, now) return nil } // Add metrics a single Process -func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { +func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time) { var prefix string if p.Prefix != "" { prefix = p.Prefix + "_" @@ -180,9 +212,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { //If cmd_line tag is true and it is not already set add cmdline as a tag if p.CmdLineTag { if _, ok := proc.Tags()["cmdline"]; !ok { - Cmdline, err := proc.Cmdline() + cmdline, err := proc.Cmdline() if err == nil { - proc.Tags()["cmdline"] = Cmdline + proc.Tags()["cmdline"] = cmdline } } } @@ -224,23 +256,27 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns } - cpu_time, err := proc.Times() + cpuTime, err := proc.Times() if err == nil { - fields[prefix+"cpu_time_user"] = cpu_time.User - fields[prefix+"cpu_time_system"] = cpu_time.System - fields[prefix+"cpu_time_idle"] = cpu_time.Idle - fields[prefix+"cpu_time_nice"] = cpu_time.Nice - fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait - fields[prefix+"cpu_time_irq"] = cpu_time.Irq - fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq - fields[prefix+"cpu_time_steal"] = cpu_time.Steal - fields[prefix+"cpu_time_guest"] = cpu_time.Guest - fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice - } - - cpu_perc, err := proc.Percent(time.Duration(0)) + fields[prefix+"cpu_time_user"] = cpuTime.User + fields[prefix+"cpu_time_system"] = cpuTime.System + fields[prefix+"cpu_time_idle"] = cpuTime.Idle + fields[prefix+"cpu_time_nice"] = cpuTime.Nice + fields[prefix+"cpu_time_iowait"] = cpuTime.Iowait + fields[prefix+"cpu_time_irq"] = cpuTime.Irq + fields[prefix+"cpu_time_soft_irq"] = cpuTime.Softirq + fields[prefix+"cpu_time_steal"] = cpuTime.Steal + fields[prefix+"cpu_time_guest"] = cpuTime.Guest + fields[prefix+"cpu_time_guest_nice"] = cpuTime.GuestNice + } + + cpuPerc, err := proc.Percent(time.Duration(0)) if err == nil { - fields[prefix+"cpu_usage"] = cpu_perc + if p.solarisMode { + fields[prefix+"cpu_usage"] = cpuPerc / float64(runtime.NumCPU()) + } else { + fields[prefix+"cpu_usage"] = cpuPerc + } } mem, err := proc.MemoryInfo() @@ -253,9 +289,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"memory_locked"] = mem.Locked } - mem_perc, err := proc.MemoryPercent() + memPerc, err := proc.MemoryPercent() if err == nil { - fields[prefix+"memory_usage"] = mem_perc + fields[prefix+"memory_usage"] = memPerc } rlims, err := proc.RlimitUsage(true) @@ -297,13 +333,16 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { } } - acc.AddFields("procstat", fields, proc.Tags()) + ppid, err := proc.Ppid() + if err == nil { + fields[prefix+"ppid"] = ppid + } + + acc.AddFields("procstat", fields, proc.Tags(), t) } // Update monitored Processes -func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process) (map[PID]Process, error) { - procs := make(map[PID]Process, len(prevInfo)) - +func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process, procs map[PID]Process) error { for _, pid := range pids { info, ok := prevInfo[pid] if ok { @@ -338,7 +377,7 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo } } } - return procs, nil + return nil } // Create and return PIDGatherer lazily @@ -354,16 +393,34 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) { } // Get matching PIDs and their initial tags -func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) { +func (p *Procstat) findPids() []PidsTags { + var pidTags []PidsTags + + if p.SystemdUnit != "" { + groups := p.systemdUnitPIDs() + return groups + } else if p.CGroup != "" { + groups := p.cgroupPIDs() + return groups + } else { + f, err := p.getPIDFinder() + if err != nil { + pidTags = append(pidTags, PidsTags{nil, nil, err}) + return pidTags + } + pids, tags, err := p.SimpleFindPids(f) + pidTags = append(pidTags, PidsTags{pids, tags, err}) + } + + return pidTags +} + +// Get matching PIDs and their initial tags +func (p *Procstat) SimpleFindPids(f PIDFinder) ([]PID, map[string]string, error) { var pids []PID tags := make(map[string]string) var err error - f, err := p.getPIDFinder() - if err != nil { - return nil, nil, err - } - if p.PidFile != "" { pids, err = f.PidFile(p.PidFile) tags = map[string]string{"pidfile": p.PidFile} @@ -374,19 +431,13 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, pids, err = f.FullPattern(p.Pattern) tags = map[string]string{"pattern": p.Pattern} } else if p.User != "" { - pids, err = f.Uid(p.User) + pids, err = f.UID(p.User) tags = map[string]string{"user": p.User} - } else if p.SystemdUnit != "" { - pids, err = p.systemdUnitPIDs() - tags = map[string]string{"systemd_unit": p.SystemdUnit} - } else if p.CGroup != "" { - pids, err = p.cgroupPIDs() - tags = map[string]string{"cgroup": p.CGroup} } else if p.WinService != "" { pids, err = p.winServicePIDs() tags = map[string]string{"win_service": p.WinService} } else { - err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") + err = fmt.Errorf("either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") } return pids, tags, err @@ -395,8 +446,23 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, // execCommand is so tests can mock out exec.Command usage. var execCommand = exec.Command -func (p *Procstat) systemdUnitPIDs() ([]PID, error) { +func (p *Procstat) systemdUnitPIDs() []PidsTags { + if p.IncludeSystemdChildren { + p.CGroup = fmt.Sprintf("systemd/system.slice/%s", p.SystemdUnit) + return p.cgroupPIDs() + } + + var pidTags []PidsTags + + pids, err := p.simpleSystemdUnitPIDs() + tags := map[string]string{"systemd_unit": p.SystemdUnit} + pidTags = append(pidTags, PidsTags{pids, tags, err}) + return pidTags +} + +func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { var pids []PID + cmd := execCommand("systemctl", "show", p.SystemdUnit) out, err := cmd.Output() if err != nil { @@ -419,18 +485,43 @@ func (p *Procstat) systemdUnitPIDs() ([]PID, error) { } pids = append(pids, PID(pid)) } + return pids, nil } -func (p *Procstat) cgroupPIDs() ([]PID, error) { - var pids []PID +func (p *Procstat) cgroupPIDs() []PidsTags { + var pidTags []PidsTags procsPath := p.CGroup if procsPath[0] != '/' { procsPath = "/sys/fs/cgroup/" + procsPath } - procsPath = filepath.Join(procsPath, "cgroup.procs") - out, err := ioutil.ReadFile(procsPath) + items, err := filepath.Glob(procsPath) + if err != nil { + pidTags = append(pidTags, PidsTags{nil, nil, fmt.Errorf("glob failed '%s'", err)}) + return pidTags + } + for _, item := range items { + pids, err := p.singleCgroupPIDs(item) + tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item} + pidTags = append(pidTags, PidsTags{pids, tags, err}) + } + + return pidTags +} + +func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { + var pids []PID + + ok, err := isDir(path) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("not a directory %s", path) + } + procsPath := filepath.Join(path, "cgroup.procs") + out, err := os.ReadFile(procsPath) if err != nil { return nil, err } @@ -448,6 +539,14 @@ func (p *Procstat) cgroupPIDs() ([]PID, error) { return pids, nil } +func isDir(path string) (bool, error) { + result, err := os.Stat(path) + if err != nil { + return false, err + } + return result.IsDir(), nil +} + func (p *Procstat) winServicePIDs() ([]PID, error) { var pids []PID @@ -461,6 +560,14 @@ func (p *Procstat) winServicePIDs() ([]PID, error) { return pids, nil } +func (p *Procstat) Init() error { + if strings.ToLower(p.Mode) == "solaris" { + p.solarisMode = true + } + + return nil +} + func init() { inputs.Add("procstat", func() telegraf.Input { return &Procstat{} diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index e1ee8ab921841..bc586fca4fa42 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -2,7 +2,6 @@ package procstat import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -27,17 +26,17 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { cmd.Stderr = os.Stderr return cmd } -func TestMockExecCommand(t *testing.T) { +func TestMockExecCommand(_ *testing.T) { var cmd []string for _, arg := range os.Args { - if string(arg) == "--" { + if arg == "--" { cmd = []string{} continue } if cmd == nil { continue } - cmd = append(cmd, string(arg)) + cmd = append(cmd, arg) } if cmd == nil { return @@ -45,6 +44,7 @@ func TestMockExecCommand(t *testing.T) { cmdline := strings.Join(cmd, " ") if cmdline == "systemctl show TestGather_systemdUnitPIDs" { + //nolint:errcheck,revive fmt.Printf(`PIDFile= GuessMainPID=yes MainPID=11408 @@ -54,6 +54,7 @@ ExecMainPID=11408 os.Exit(0) } + //nolint:errcheck,revive fmt.Printf("command not found\n") os.Exit(1) } @@ -63,16 +64,16 @@ type testPgrep struct { err error } -func pidFinder(pids []PID, err error) func() (PIDFinder, error) { +func pidFinder(pids []PID) func() (PIDFinder, error) { return func() (PIDFinder, error) { return &testPgrep{ pids: pids, - err: err, + err: nil, }, nil } } -func (pg *testPgrep) PidFile(path string) ([]PID, error) { +func (pg *testPgrep) PidFile(_ string) ([]PID, error) { return pg.pids, pg.err } @@ -80,15 +81,15 @@ func (p *testProc) Cmdline() (string, error) { return "test_proc", nil } -func (pg *testPgrep) Pattern(pattern string) ([]PID, error) { +func (pg *testPgrep) Pattern(_ string) ([]PID, error) { return pg.pids, pg.err } -func (pg *testPgrep) Uid(user string) ([]PID, error) { +func (pg *testPgrep) UID(_ string) ([]PID, error) { return pg.pids, pg.err } -func (pg *testPgrep) FullPattern(pattern string) ([]PID, error) { +func (pg *testPgrep) FullPattern(_ string) ([]PID, error) { return pg.pids, pg.err } @@ -97,7 +98,7 @@ type testProc struct { tags map[string]string } -func newTestProc(pid PID) (Process, error) { +func newTestProc(_ PID) (Process, error) { proc := &testProc{ tags: make(map[string]string), } @@ -144,7 +145,7 @@ func (p *testProc) NumThreads() (int32, error) { return 0, nil } -func (p *testProc) Percent(interval time.Duration) (float64, error) { +func (p *testProc) Percent(_ time.Duration) (float64, error) { return 0, nil } @@ -160,19 +161,23 @@ func (p *testProc) Times() (*cpu.TimesStat, error) { return &cpu.TimesStat{}, nil } -func (p *testProc) RlimitUsage(gatherUsage bool) ([]process.RlimitStat, error) { +func (p *testProc) RlimitUsage(_ bool) ([]process.RlimitStat, error) { return []process.RlimitStat{}, nil } -var pid PID = PID(42) -var exe string = "foo" +func (p *testProc) Ppid() (int32, error) { + return 0, nil +} + +var pid = PID(42) +var exe = "foo" func TestGather_CreateProcessErrorOk(t *testing.T) { var acc testutil.Accumulator p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: func(PID) (Process, error) { return nil, fmt.Errorf("createProcess error") }, @@ -198,7 +203,7 @@ func TestGather_ProcessName(t *testing.T) { p := Procstat{ Exe: exe, ProcessName: "custom_name", - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -212,7 +217,7 @@ func TestGather_NoProcessNameUsesReal(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -225,7 +230,7 @@ func TestGather_NoPidTag(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -239,7 +244,7 @@ func TestGather_PidTag(t *testing.T) { p := Procstat{ Exe: exe, PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -253,7 +258,7 @@ func TestGather_Prefix(t *testing.T) { p := Procstat{ Exe: exe, Prefix: "custom_prefix", - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -265,7 +270,7 @@ func TestGather_Exe(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -279,7 +284,7 @@ func TestGather_User(t *testing.T) { p := Procstat{ User: user, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -293,7 +298,7 @@ func TestGather_Pattern(t *testing.T) { p := Procstat{ Pattern: pattern, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -305,7 +310,7 @@ func TestGather_MissingPidMethod(t *testing.T) { var acc testutil.Accumulator p := Procstat{ - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.Error(t, acc.GatherError(p.Gather)) @@ -317,7 +322,7 @@ func TestGather_PidFile(t *testing.T) { p := Procstat{ PidFile: pidfile, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -332,7 +337,7 @@ func TestGather_PercentFirstPass(t *testing.T) { p := Procstat{ Pattern: "foo", PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: NewProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -348,7 +353,7 @@ func TestGather_PercentSecondPass(t *testing.T) { p := Procstat{ Pattern: "foo", PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: NewProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -360,14 +365,18 @@ func TestGather_PercentSecondPass(t *testing.T) { func TestGather_systemdUnitPIDs(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{}, nil), + createPIDFinder: pidFinder([]PID{}), SystemdUnit: "TestGather_systemdUnitPIDs", } - var acc testutil.Accumulator - pids, tags, err := p.findPids(&acc) - require.NoError(t, err) - assert.Equal(t, []PID{11408}, pids) - assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + pidsTags := p.findPids() + for _, pidsTag := range pidsTags { + pids := pidsTag.PIDS + tags := pidsTag.Tags + err := pidsTag.Err + require.NoError(t, err) + assert.Equal(t, []PID{11408}, pids) + assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + } } func TestGather_cgroupPIDs(t *testing.T) { @@ -375,26 +384,30 @@ func TestGather_cgroupPIDs(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("no cgroups in windows") } - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - err = ioutil.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) + err = os.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) require.NoError(t, err) p := Procstat{ - createPIDFinder: pidFinder([]PID{}, nil), + createPIDFinder: pidFinder([]PID{}), CGroup: td, } - var acc testutil.Accumulator - pids, tags, err := p.findPids(&acc) - require.NoError(t, err) - assert.Equal(t, []PID{1234, 5678}, pids) - assert.Equal(t, td, tags["cgroup"]) + pidsTags := p.findPids() + for _, pidsTag := range pidsTags { + pids := pidsTag.PIDS + tags := pidsTag.Tags + err := pidsTag.Err + require.NoError(t, err) + assert.Equal(t, []PID{1234, 5678}, pids) + assert.Equal(t, td, tags["cgroup"]) + } } func TestProcstatLookupMetric(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{543}, nil), + createPIDFinder: pidFinder([]PID{543}), Exe: "-Gsys", } var acc testutil.Accumulator @@ -402,3 +415,20 @@ func TestProcstatLookupMetric(t *testing.T) { require.NoError(t, err) require.Equal(t, len(p.procs)+1, len(acc.Metrics)) } + +func TestGather_SameTimestamps(t *testing.T) { + var acc testutil.Accumulator + pidfile := "/path/to/pidfile" + + p := Procstat{ + PidFile: pidfile, + createPIDFinder: pidFinder([]PID{pid}), + createProcess: newTestProc, + } + require.NoError(t, acc.GatherError(p.Gather)) + + procstat, _ := acc.Get("procstat") + procstatLookup, _ := acc.Get("procstat_lookup") + + require.Equal(t, procstat.Time, procstatLookup.Time) +} diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go index 3d539d9f9918c..b7efcee17cdc1 100644 --- a/plugins/inputs/procstat/win_service_notwindows.go +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat @@ -6,6 +7,6 @@ import ( "fmt" ) -func queryPidWithWinServiceName(winServiceName string) (uint32, error) { +func queryPidWithWinServiceName(_ string) (uint32, error) { return 0, fmt.Errorf("os not support win_service option") } diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go index 06dffc8472089..5d9c196e388c0 100644 --- a/plugins/inputs/procstat/win_service_windows.go +++ b/plugins/inputs/procstat/win_service_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package procstat diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index e9dd119cc12d4..955c6ab7d978b 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -10,29 +10,46 @@ in Prometheus format. [[inputs.prometheus]] ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - + ## Metric version controls the mapping from Prometheus metrics into ## Telegraf metrics. When using the prometheus_client output, use the same ## value in both plugins to ensure metrics are round-tripped without ## modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 - + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "url" + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - + ## Kubernetes config file to create client from. # kube_config = "/path/to/kubernetes.config" - + ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to - ## set this to `https` & most likely set the tls config. + ## set this to 'https' & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + ## Default is 60 seconds. + # pod_scrape_interval = 60 + ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" @@ -42,23 +59,37 @@ in Prometheus format. # eg. To scrape pods on a specific node # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + ## Scrape Services available in Consul Catalog + # [inputs.prometheus.consul] + # enabled = true + # agent = "http://localhost:8500" + # query_interval = "5m" + + # [[inputs.prometheus.consul.query]] + # name = "a service name" + # tag = "a service tag" + # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' + # [inputs.prometheus.consul.query.tags] + # host = "{{.Node}}" + ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" - + ## HTTP Basic Authentication username and password. ('bearer_token' and ## 'bearer_token_string' take priority) # username = "" # password = "" - + ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" - + ## Optional TLS Config # tls_ca = /path/to/cafile # tls_cert = /path/to/certfile # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification # insecure_skip_verify = false ``` @@ -88,6 +119,37 @@ Currently the following annotation are supported: Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping. +Using `pod_scrape_scope = "node"` allows more scalable scraping for pods which will scrape pods only in the node that telegraf is running. It will fetch the pod list locally from the node's kubelet. This will require running Telegraf in every node of the cluster. Note that either `node_ip` must be specified in the config or the environment variable `NODE_IP` must be set to the host IP. ThisThe latter can be done in the yaml of the pod running telegraf: +``` +env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + ``` + +If using node level scrape scope, `pod_scrape_interval` specifies how often (in seconds) the pod list for scraping should updated. If not specified, the default is 60 seconds. + +#### Consul Service Discovery + +Enabling this option and configuring consul `agent` url will allow the plugin to query +consul catalog for available services. Using `query_interval` the plugin will periodically +query the consul catalog for services with `name` and `tag` and refresh the list of scraped urls. +It can use the information from the catalog to build the scraped url and additional tags from a template. + +Multiple consul queries can be configured, each for different service. +The following example fields can be used in url or tag templates: +* Node +* Address +* NodeMeta +* ServicePort +* ServiceAddress +* ServiceTags +* ServiceMeta + +For full list of available fields and their type see struct CatalogService in +https://github.com/hashicorp/consul/blob/master/api/catalog.go + #### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on @@ -96,20 +158,20 @@ Authorization header. ### Usage for Caddy HTTP server -If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin: +Steps to monitor Caddy with Telegraf's Prometheus input plugin: -* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus) -* Add the `prometheus` directive in your `CaddyFile` +* Download [Caddy](https://caddyserver.com/download) +* Download Prometheus and set up [monitoring Caddy with Prometheus metrics](https://caddyserver.com/docs/metrics#monitoring-caddy-with-prometheus-metrics) * Restart Caddy * Configure Telegraf to fetch metrics on it: ```toml [[inputs.prometheus]] # ## An array of urls to scrape metrics from. - urls = ["http://localhost:9180/metrics"] + urls = ["http://localhost:2019/metrics"] ``` -> This is the default URL where Caddy Prometheus plugin will send data. +> This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). ### Metrics: diff --git a/plugins/inputs/prometheus/consul.go b/plugins/inputs/prometheus/consul.go new file mode 100644 index 0000000000000..2f008a495c09b --- /dev/null +++ b/plugins/inputs/prometheus/consul.go @@ -0,0 +1,208 @@ +package prometheus + +import ( + "bytes" + "context" + "fmt" + "net/url" + "strings" + "text/template" + "time" + + "github.com/hashicorp/consul/api" + "github.com/influxdata/telegraf/config" +) + +type ConsulConfig struct { + // Address of the Consul agent. The address must contain a hostname or an IP address + // and optionally a port (format: "host:port"). + Enabled bool `toml:"enabled"` + Agent string `toml:"agent"` + QueryInterval config.Duration `toml:"query_interval"` + Queries []*ConsulQuery `toml:"query"` +} + +// One Consul service discovery query +type ConsulQuery struct { + // A name of the searched services (not ID) + ServiceName string `toml:"name"` + + // A tag of the searched services + ServiceTag string `toml:"tag"` + + // A DC of the searched services + ServiceDc string `toml:"dc"` + + // A template URL of the Prometheus gathering interface. The hostname part + // of the URL will be replaced by discovered address and port. + ServiceURL string `toml:"url"` + + // Extra tags to add to metrics found in Consul + ServiceExtraTags map[string]string `toml:"tags"` + + serviceURLTemplate *template.Template + serviceExtraTagsTemplate map[string]*template.Template + + // Store last error status and change log level depending on repeated occurence + lastQueryFailed bool +} + +func (p *Prometheus) startConsul(ctx context.Context) error { + consulAPIConfig := api.DefaultConfig() + if p.ConsulConfig.Agent != "" { + consulAPIConfig.Address = p.ConsulConfig.Agent + } + + consul, err := api.NewClient(consulAPIConfig) + if err != nil { + return fmt.Errorf("cannot connect to the Consul agent: %v", err) + } + + // Parse the template for metrics URL, drop queries with template parse errors + i := 0 + for _, q := range p.ConsulConfig.Queries { + serviceURLTemplate, err := template.New("URL").Parse(q.ServiceURL) + if err != nil { + p.Log.Errorf("Could not parse the Consul query URL template (%s), skipping it. Error: %s", q.ServiceURL, err) + continue + } + q.serviceURLTemplate = serviceURLTemplate + + // Allow to use join function in tags + templateFunctions := template.FuncMap{"join": strings.Join} + // Parse the tag value templates + q.serviceExtraTagsTemplate = make(map[string]*template.Template) + for tagName, tagTemplateString := range q.ServiceExtraTags { + tagTemplate, err := template.New(tagName).Funcs(templateFunctions).Parse(tagTemplateString) + if err != nil { + p.Log.Errorf("Could not parse the Consul query Extra Tag template (%s), skipping it. Error: %s", tagTemplateString, err) + continue + } + q.serviceExtraTagsTemplate[tagName] = tagTemplate + } + p.ConsulConfig.Queries[i] = q + i++ + } + // Prevent memory leak by erasing truncated values + for j := i; j < len(p.ConsulConfig.Queries); j++ { + p.ConsulConfig.Queries[j] = nil + } + p.ConsulConfig.Queries = p.ConsulConfig.Queries[:i] + + catalog := consul.Catalog() + + p.wg.Add(1) + go func() { + // Store last error status and change log level depending on repeated occurence + var refreshFailed = false + defer p.wg.Done() + err := p.refreshConsulServices(catalog) + if err != nil { + refreshFailed = true + p.Log.Errorf("Unable to refreh Consul services: %v", err) + } + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Duration(p.ConsulConfig.QueryInterval)): + err := p.refreshConsulServices(catalog) + if err != nil { + message := fmt.Sprintf("Unable to refreh Consul services: %v", err) + if refreshFailed { + p.Log.Debug(message) + } else { + p.Log.Warn(message) + } + refreshFailed = true + } else if refreshFailed { + refreshFailed = false + p.Log.Info("Successfully refreshed Consul services after previous errors") + } + } + } + }() + + return nil +} + +func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { + consulServiceURLs := make(map[string]URLAndAddress) + + p.Log.Debugf("Refreshing Consul services") + + for _, q := range p.ConsulConfig.Queries { + queryOptions := api.QueryOptions{} + if q.ServiceDc != "" { + queryOptions.Datacenter = q.ServiceDc + } + + // Request services from Consul + consulServices, _, err := c.Service(q.ServiceName, q.ServiceTag, &queryOptions) + if err != nil { + return err + } + if len(consulServices) == 0 { + p.Log.Debugf("Queried Consul for Service (%s, %s) but did not find any instances", q.ServiceName, q.ServiceTag) + continue + } + p.Log.Debugf("Queried Consul for Service (%s, %s) and found %d instances", q.ServiceName, q.ServiceTag, len(consulServices)) + + for _, consulService := range consulServices { + uaa, err := p.getConsulServiceURL(q, consulService) + if err != nil { + message := fmt.Sprintf("Unable to get scrape URLs from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, err) + if q.lastQueryFailed { + p.Log.Debug(message) + } else { + p.Log.Warn(message) + } + q.lastQueryFailed = true + break + } + if q.lastQueryFailed { + p.Log.Infof("Created scrape URLs from Consul for Service (%s, %s)", q.ServiceName, q.ServiceTag) + } + q.lastQueryFailed = false + p.Log.Debugf("Adding scrape URL from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, uaa.URL.String()) + consulServiceURLs[uaa.URL.String()] = *uaa + } + } + + p.lock.Lock() + p.consulServices = consulServiceURLs + p.lock.Unlock() + + return nil +} + +func (p *Prometheus) getConsulServiceURL(q *ConsulQuery, s *api.CatalogService) (*URLAndAddress, error) { + var buffer bytes.Buffer + buffer.Reset() + err := q.serviceURLTemplate.Execute(&buffer, s) + if err != nil { + return nil, err + } + serviceURL, err := url.Parse(buffer.String()) + if err != nil { + return nil, err + } + + extraTags := make(map[string]string) + for tagName, tagTemplate := range q.serviceExtraTagsTemplate { + buffer.Reset() + err = tagTemplate.Execute(&buffer, s) + if err != nil { + return nil, err + } + extraTags[tagName] = buffer.String() + } + + p.Log.Debugf("Will scrape metrics from Consul Service %s", serviceURL.String()) + + return &URLAndAddress{ + URL: serviceURL, + OriginalURL: serviceURL, + Tags: extraTags, + }, nil +} diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 16f69cbd14228..9a4d6bd325c46 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -2,48 +2,67 @@ package prometheus import ( "context" + "crypto/tls" + "encoding/json" "fmt" - "io/ioutil" - "log" "net" + "net/http" "net/url" + "os" "os/user" "path/filepath" - "sync" "time" - "github.com/ericchiang/k8s" - corev1 "github.com/ericchiang/k8s/apis/core/v1" "github.com/ghodss/yaml" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) -type payload struct { - eventype string - pod *corev1.Pod +type podMetadata struct { + ResourceVersion string `json:"resourceVersion"` + SelfLink string `json:"selfLink"` } +type podResponse struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + Metadata podMetadata `json:"metadata"` + Items []*corev1.Pod `json:"items,string,omitempty"` +} + +const cAdvisorPodListDefaultInterval = 60 + // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. -func loadClient(kubeconfigPath string) (*k8s.Client, error) { - data, err := ioutil.ReadFile(kubeconfigPath) +func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { + data, err := os.ReadFile(kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) } // Unmarshal YAML into a Kubernetes config object. - var config k8s.Config + var config rest.Config if err := yaml.Unmarshal(data, &config); err != nil { return nil, err } - return k8s.NewClient(&config) + return kubernetes.NewForConfig(&config) } -func (p *Prometheus) start(ctx context.Context) error { - client, err := k8s.NewInClusterClient() +func (p *Prometheus) startK8s(ctx context.Context) error { + config, err := rest.InClusterConfig() + if err != nil { + return fmt.Errorf("failed to get InClusterConfig - %v", err) + } + client, err := kubernetes.NewForConfig(config) if err != nil { u, err := user.Current() if err != nil { - return fmt.Errorf("Failed to get current user - %v", err) + return fmt.Errorf("failed to get current user - %v", err) } configLocation := filepath.Join(u.HomeDir, ".kube/config") @@ -56,8 +75,6 @@ func (p *Prometheus) start(ctx context.Context) error { } } - p.wg = sync.WaitGroup{} - p.wg.Add(1) go func() { defer p.wg.Done() @@ -66,9 +83,16 @@ func (p *Prometheus) start(ctx context.Context) error { case <-ctx.Done(): return case <-time.After(time.Second): - err := p.watch(ctx, client) - if err != nil { - p.Log.Errorf("Unable to watch resources: %s", err.Error()) + if p.isNodeScrapeScope { + err = p.cAdvisor(ctx, config.BearerToken) + if err != nil { + p.Log.Errorf("Unable to monitor pods with node scrape scope: %s", err.Error()) + } + } else { + err = p.watchPod(ctx, client) + if err != nil { + p.Log.Errorf("Unable to watch resources: %s", err.Error()) + } } } } @@ -81,126 +105,242 @@ func (p *Prometheus) start(ctx context.Context) error { // (without the scrape annotations). K8s may re-assign the old pod ip to the non-scrape // pod, causing errors in the logs. This is only true if the pod going offline is not // directed to do so by K8s. -func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { - - selectors := podSelector(p) - - pod := &corev1.Pod{} - watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...) +func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) error { + watcher, err := client.CoreV1().Pods(p.PodNamespace).Watch(ctx, metav1.ListOptions{ + LabelSelector: p.KubernetesLabelSelector, + FieldSelector: p.KubernetesFieldSelector, + }) + defer watcher.Stop() if err != nil { return err } - defer watcher.Close() for { select { case <-ctx.Done(): return nil default: - pod = &corev1.Pod{} - // An error here means we need to reconnect the watcher. - eventType, err := watcher.Next(pod) - if err != nil { - return err - } + for event := range watcher.ResultChan() { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return fmt.Errorf("Unexpected object when getting pods") + } - // If the pod is not "ready", there will be no ip associated with it. - if pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] != "true" || - !podReady(pod.Status.GetContainerStatuses()) { - continue - } + // If the pod is not "ready", there will be no ip associated with it. + if pod.Annotations["prometheus.io/scrape"] != "true" || + !podReady(pod.Status.ContainerStatuses) { + continue + } - switch eventType { - case k8s.EventAdded: - registerPod(pod, p) - case k8s.EventModified: - // To avoid multiple actions for each event, unregister on the first event - // in the delete sequence, when the containers are still "ready". - if pod.Metadata.GetDeletionTimestamp() != nil { - unregisterPod(pod, p) - } else { + switch event.Type { + case watch.Added: registerPod(pod, p) + case watch.Modified: + // To avoid multiple actions for each event, unregister on the first event + // in the delete sequence, when the containers are still "ready". + if pod.GetDeletionTimestamp() != nil { + unregisterPod(pod, p) + } else { + registerPod(pod, p) + } } } } } } -func podReady(statuss []*corev1.ContainerStatus) bool { - if len(statuss) == 0 { - return false +func (p *Prometheus) cAdvisor(ctx context.Context, bearerToken string) error { + // The request will be the same each time + podsURL := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) + req, err := http.NewRequest("GET", podsURL, nil) + if err != nil { + return fmt.Errorf("error when creating request to %s to get pod list: %w", podsURL, err) } - for _, cs := range statuss { - if !cs.GetReady() { - return false + req.Header.Set("Authorization", "Bearer "+bearerToken) + req.Header.Add("Accept", "application/json") + + // Update right away so code is not waiting the length of the specified scrape interval initially + err = updateCadvisorPodList(p, req) + if err != nil { + return fmt.Errorf("error initially updating pod list: %w", err) + } + + scrapeInterval := cAdvisorPodListDefaultInterval + if p.PodScrapeInterval != 0 { + scrapeInterval = p.PodScrapeInterval + } + + for { + select { + case <-ctx.Done(): + return nil + case <-time.After(time.Duration(scrapeInterval) * time.Second): + err := updateCadvisorPodList(p, req) + if err != nil { + return fmt.Errorf("error updating pod list: %w", err) + } } } - return true } -func podSelector(p *Prometheus) []k8s.Option { - options := []k8s.Option{} +func updateCadvisorPodList(p *Prometheus, req *http.Request) error { + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + httpClient := http.Client{} - if len(p.KubernetesLabelSelector) > 0 { - options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector)) + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("error when making request for pod list: %w", err) } - if len(p.KubernetesFieldSelector) > 0 { - options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector)) + // If err is nil, still check response code + if resp.StatusCode != 200 { + return fmt.Errorf("error when making request for pod list with status %s", resp.Status) } - return options + defer resp.Body.Close() + + cadvisorPodsResponse := podResponse{} + // Will have expected type errors for some parts of corev1.Pod struct for some unused fields + // Instead have nil checks for every used field in case of incorrect decoding + if err := json.NewDecoder(resp.Body).Decode(&cadvisorPodsResponse); err != nil { + return fmt.Errorf("decoding response failed: %v", err) + } + pods := cadvisorPodsResponse.Items + + // Updating pod list to be latest cadvisor response + p.lock.Lock() + p.kubernetesPods = make(map[string]URLAndAddress) + + // Register pod only if it has an annotation to scrape, if it is ready, + // and if namespace and selectors are specified and match + for _, pod := range pods { + if necessaryPodFieldsArePresent(pod) && + pod.Annotations["prometheus.io/scrape"] == "true" && + podReady(pod.Status.ContainerStatuses) && + podHasMatchingNamespace(pod, p) && + podHasMatchingLabelSelector(pod, p.podLabelSelector) && + podHasMatchingFieldSelector(pod, p.podFieldSelector) { + registerPod(pod, p) + } + } + p.lock.Unlock() + + // No errors + return nil +} + +func necessaryPodFieldsArePresent(pod *corev1.Pod) bool { + return pod.Annotations != nil && + pod.Labels != nil && + pod.Status.ContainerStatuses != nil +} + +/* See the docs on kubernetes label selectors: + * https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + */ +func podHasMatchingLabelSelector(pod *corev1.Pod, labelSelector labels.Selector) bool { + if labelSelector == nil { + return true + } + + var labelsSet labels.Set = pod.Labels + return labelSelector.Matches(labelsSet) +} + +/* See ToSelectableFields() for list of fields that are selectable: + * https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go + * See docs on kubernetes field selectors: + * https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ + */ +func podHasMatchingFieldSelector(pod *corev1.Pod, fieldSelector fields.Selector) bool { + if fieldSelector == nil { + return true + } + + fieldsSet := make(fields.Set) + fieldsSet["spec.nodeName"] = pod.Spec.NodeName + fieldsSet["spec.restartPolicy"] = string(pod.Spec.RestartPolicy) + fieldsSet["spec.schedulerName"] = pod.Spec.SchedulerName + fieldsSet["spec.serviceAccountName"] = pod.Spec.ServiceAccountName + fieldsSet["status.phase"] = string(pod.Status.Phase) + fieldsSet["status.podIP"] = pod.Status.PodIP + fieldsSet["status.nominatedNodeName"] = pod.Status.NominatedNodeName + + return fieldSelector.Matches(fieldsSet) +} + +/* + * If a namespace is specified and the pod doesn't have that namespace, return false + * Else return true + */ +func podHasMatchingNamespace(pod *corev1.Pod, p *Prometheus) bool { + return !(p.PodNamespace != "" && pod.Namespace != p.PodNamespace) +} + +func podReady(statuss []corev1.ContainerStatus) bool { + if len(statuss) == 0 { + return false + } + for _, cs := range statuss { + if !cs.Ready { + return false + } + } + return true } func registerPod(pod *corev1.Pod, p *Prometheus) { if p.kubernetesPods == nil { p.kubernetesPods = map[string]URLAndAddress{} } - targetURL := getScrapeURL(pod) - if targetURL == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("could not parse URL: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL) + p.Log.Debugf("will scrape metrics from %q", targetURL.String()) // add annotation as metrics tags - tags := pod.GetMetadata().GetAnnotations() + tags := pod.Annotations if tags == nil { tags = map[string]string{} } - tags["pod_name"] = pod.GetMetadata().GetName() - tags["namespace"] = pod.GetMetadata().GetNamespace() + tags["pod_name"] = pod.Name + tags["namespace"] = pod.Namespace // add labels as metrics tags - for k, v := range pod.GetMetadata().GetLabels() { + for k, v := range pod.Labels { tags[k] = v } - URL, err := url.Parse(*targetURL) - if err != nil { - log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error()) - return + podURL := p.AddressToURL(targetURL, targetURL.Hostname()) + + // Locks earlier if using cAdvisor calls - makes a new list each time + // rather than updating and removing from the same list + if !p.isNodeScrapeScope { + p.lock.Lock() + defer p.lock.Unlock() } - podURL := p.AddressToURL(URL, URL.Hostname()) - p.lock.Lock() p.kubernetesPods[podURL.String()] = URLAndAddress{ URL: podURL, - Address: URL.Hostname(), - OriginalURL: URL, + Address: targetURL.Hostname(), + OriginalURL: targetURL, Tags: tags, } - p.lock.Unlock() } -func getScrapeURL(pod *corev1.Pod) *string { - ip := pod.Status.GetPodIP() +func getScrapeURL(pod *corev1.Pod) (*url.URL, error) { + ip := pod.Status.PodIP if ip == "" { // return as if scrape was disabled, we will be notified again once the pod // has an IP - return nil + return nil, nil } - scheme := pod.GetMetadata().GetAnnotations()["prometheus.io/scheme"] - path := pod.GetMetadata().GetAnnotations()["prometheus.io/path"] - port := pod.GetMetadata().GetAnnotations()["prometheus.io/port"] + scheme := pod.Annotations["prometheus.io/scheme"] + pathAndQuery := pod.Annotations["prometheus.io/path"] + port := pod.Annotations["prometheus.io/port"] if scheme == "" { scheme = "http" @@ -208,34 +348,36 @@ func getScrapeURL(pod *corev1.Pod) *string { if port == "" { port = "9102" } - if path == "" { - path = "/metrics" + if pathAndQuery == "" { + pathAndQuery = "/metrics" } - u := &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(ip, port), - Path: path, + base, err := url.Parse(pathAndQuery) + if err != nil { + return nil, err } - x := u.String() + base.Scheme = scheme + base.Host = net.JoinHostPort(ip, port) - return &x + return base, nil } func unregisterPod(pod *corev1.Pod, p *Prometheus) { - url := getScrapeURL(pod) - if url == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("failed to parse url: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q", - pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace()) + p.Log.Debugf("registered a delete request for %q in namespace %q", pod.Name, pod.Namespace) p.lock.Lock() defer p.lock.Unlock() - if _, ok := p.kubernetesPods[*url]; ok { - delete(p.kubernetesPods, *url) - log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url) + if _, ok := p.kubernetesPods[targetURL.String()]; ok { + delete(p.kubernetesPods, targetURL.String()) + p.Log.Debugf("will stop scraping for %q", targetURL.String()) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 8568ac946437e..2f67607cd3cf3 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -1,64 +1,87 @@ package prometheus import ( - "github.com/ericchiang/k8s" "testing" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" - v1 "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestScrapeURLNoAnnotations(t *testing.T) { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} - p.GetMetadata().Annotations = map[string]string{} - url := getScrapeURL(p) + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} + p.Annotations = map[string]string{} + url, err := getScrapeURL(p) + assert.NoError(t, err) assert.Nil(t, url) } func TestScrapeURLAnnotationsNoScrape(t *testing.T) { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} - p.Metadata.Name = str("myPod") - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "false"} - url := getScrapeURL(p) + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} + p.Name = "myPod" + p.Annotations = map[string]string{"prometheus.io/scrape": "false"} + url, err := getScrapeURL(p) + assert.NoError(t, err) assert.Nil(t, url) } func TestScrapeURLAnnotations(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithQueryParameters(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics?format=prometheus"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics#prometheus"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) } func TestAddPod(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) assert.Equal(t, 1, len(prom.kubernetesPods)) } @@ -67,9 +90,9 @@ func TestAddMultipleDuplicatePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - p.Metadata.Name = str("Pod2") + p.Name = "Pod2" registerPod(p, prom) assert.Equal(t, 1, len(prom.kubernetesPods)) } @@ -78,10 +101,10 @@ func TestAddMultiplePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - p.Metadata.Name = str("Pod2") - p.Status.PodIP = str("127.0.0.2") + p.Name = "Pod2" + p.Status.PodIP = "127.0.0.2" registerPod(p, prom) assert.Equal(t, 2, len(prom.kubernetesPods)) } @@ -90,66 +113,72 @@ func TestDeletePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) unregisterPod(p, prom) assert.Equal(t, 0, len(prom.kubernetesPods)) } -func TestPodSelector(t *testing.T) { - - cases := []struct { - expected []k8s.Option - labelselector string - fieldselector string - }{ - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1=val1,key2=val2,key3"), - k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), - }, - labelselector: "key1=val1,key2=val2,key3", - fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", - }, - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1"), - k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), - }, - labelselector: "key1", - fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", - }, - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1"), - k8s.QueryParam("fieldSelector", "somefield"), - }, - labelselector: "key1", - fieldselector: "somefield", - }, - } - - for _, c := range cases { - prom := &Prometheus{ - Log: testutil.Logger{}, - KubernetesLabelSelector: c.labelselector, - KubernetesFieldSelector: c.fieldselector, - } - - output := podSelector(prom) - - assert.Equal(t, len(output), len(c.expected)) - } -} - -func pod() *v1.Pod { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}} - p.Status.PodIP = str("127.0.0.1") - p.Metadata.Name = str("myPod") - p.Metadata.Namespace = str("default") - return p +func TestPodHasMatchingNamespace(t *testing.T) { + prom := &Prometheus{Log: testutil.Logger{}, PodNamespace: "default"} + + pod := pod() + pod.Name = "Pod1" + pod.Namespace = "default" + shouldMatch := podHasMatchingNamespace(pod, prom) + assert.Equal(t, true, shouldMatch) + + pod.Name = "Pod2" + pod.Namespace = "namespace" + shouldNotMatch := podHasMatchingNamespace(pod, prom) + assert.Equal(t, false, shouldNotMatch) } -func str(x string) *string { - return &x +func TestPodHasMatchingLabelSelector(t *testing.T) { + labelSelectorString := "label0==label0,label1=label1,label2!=label,label3 in (label1,label2, label3),label4 notin (label1, label2,label3),label5,!label6" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesLabelSelector: labelSelectorString} + + pod := pod() + pod.Labels = make(map[string]string) + pod.Labels["label0"] = "label0" + pod.Labels["label1"] = "label1" + pod.Labels["label2"] = "label2" + pod.Labels["label3"] = "label3" + pod.Labels["label4"] = "label4" + pod.Labels["label5"] = "label5" + + labelSelector, err := labels.Parse(prom.KubernetesLabelSelector) + assert.Equal(t, err, nil) + assert.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector)) +} + +func TestPodHasMatchingFieldSelector(t *testing.T) { + fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + pod := pod() + pod.Spec.RestartPolicy = "Always" + pod.Spec.NodeName = "node1000" + + fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector) + assert.Equal(t, err, nil) + assert.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector)) +} + +func TestInvalidFieldSelector(t *testing.T) { + fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName,spec.nodeName" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + pod := pod() + pod.Spec.RestartPolicy = "Always" + pod.Spec.NodeName = "node1000" + + _, err := fields.ParseSelector(prom.KubernetesFieldSelector) + assert.NotEqual(t, err, nil) +} + +func pod() *corev1.Pod { + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}, Status: corev1.PodStatus{}, Spec: corev1.PodSpec{}} + p.Status.PodIP = "127.0.0.1" + p.Name = "myPod" + p.Namespace = "default" + return p } diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 0726c87713b0a..7d3140dc7d627 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -1,8 +1,5 @@ package prometheus -// Parser inspired from -// https://github.com/prometheus/prom2json/blob/master/main.go - import ( "bufio" "bytes" @@ -15,168 +12,27 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" + "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" ) -// Parse returns a slice of Metrics from a text representation of a -// metrics -func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { - var metrics []telegraf.Metric - var parser expfmt.TextParser - // parse even if the buffer begins with a newline - buf = bytes.TrimPrefix(buf, []byte("\n")) - // Read raw data - buffer := bytes.NewBuffer(buf) - reader := bufio.NewReader(buffer) - - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) - // Prepare output - metricFamilies := make(map[string]*dto.MetricFamily) - - if err == nil && mediatype == "application/vnd.google.protobuf" && - params["encoding"] == "delimited" && - params["proto"] == "io.prometheus.client.MetricFamily" { - for { - mf := &dto.MetricFamily{} - if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { - if ierr == io.EOF { - break - } - return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr) - } - metricFamilies[mf.GetName()] = mf - } - } else { - metricFamilies, err = parser.TextToMetricFamilies(reader) - if err != nil { - return nil, fmt.Errorf("reading text format failed: %s", err) - } - } - - // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds - now := time.Now() - // read metrics - for metricName, mf := range metricFamilies { - for _, m := range mf.Metric { - // reading tags - tags := makeLabels(m) - - if mf.GetType() == dto.MetricType_SUMMARY { - // summary metric - telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now) - metrics = append(metrics, telegrafMetrics...) - } else if mf.GetType() == dto.MetricType_HISTOGRAM { - // histogram metric - telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now) - metrics = append(metrics, telegrafMetrics...) - } else { - // standard metric - // reading fields - fields := getNameAndValueV2(m, metricName) - // converting to telegraf metric - if len(fields) > 0 { - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType())) - if err == nil { - metrics = append(metrics, metric) - } - } - } - } - } - - return metrics, err -} - -// Get Quantiles for summary metric & Buckets for histogram -func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { - var metrics []telegraf.Metric - fields := make(map[string]interface{}) - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) - met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } - - for _, q := range m.GetSummary().Quantile { - newTags := tags - fields = make(map[string]interface{}) - - newTags["quantile"] = fmt.Sprint(q.GetQuantile()) - fields[metricName] = float64(q.GetValue()) - - quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, quantileMetric) - } - } - return metrics -} - -// Get Buckets from histogram metric -func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { - var metrics []telegraf.Metric - fields := make(map[string]interface{}) - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) - - met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } - - for _, b := range m.GetHistogram().Bucket { - newTags := tags - fields = make(map[string]interface{}) - newTags["le"] = fmt.Sprint(b.GetUpperBound()) - fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) - - histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, histogramMetric) - } - } - return metrics -} - -// Parse returns a slice of Metrics from a text representation of a -// metrics func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { - var metrics []telegraf.Metric var parser expfmt.TextParser + var metrics []telegraf.Metric + var err error // parse even if the buffer begins with a newline buf = bytes.TrimPrefix(buf, []byte("\n")) // Read raw data buffer := bytes.NewBuffer(buf) reader := bufio.NewReader(buffer) - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) // Prepare output metricFamilies := make(map[string]*dto.MetricFamily) - if err == nil && mediatype == "application/vnd.google.protobuf" && - params["encoding"] == "delimited" && - params["proto"] == "io.prometheus.client.MetricFamily" { + if isProtobuf(header) { for { mf := &dto.MetricFamily{} if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { @@ -194,13 +50,13 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } } - // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds now := time.Now() // read metrics for metricName, mf := range metricFamilies { for _, m := range mf.Metric { // reading tags - tags := makeLabels(m) + tags := common.MakeLabels(m, nil) + // reading fields var fields map[string]interface{} if mf.GetType() == dto.MetricType_SUMMARY { @@ -213,7 +69,6 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { fields = makeBuckets(m) fields["count"] = float64(m.GetHistogram().GetSampleCount()) fields["sum"] = float64(m.GetHistogram().GetSampleSum()) - } else { // standard metric fields = getNameAndValue(m) @@ -226,10 +81,8 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } else { t = now } - metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType())) - if err == nil { - metrics = append(metrics, metric) - } + m := metric.New(metricName, tags, fields, t, common.ValueType(mf.GetType())) + metrics = append(metrics, m) } } } @@ -237,19 +90,15 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { return metrics, err } -func valueType(mt dto.MetricType) telegraf.ValueType { - switch mt { - case dto.MetricType_COUNTER: - return telegraf.Counter - case dto.MetricType_GAUGE: - return telegraf.Gauge - case dto.MetricType_SUMMARY: - return telegraf.Summary - case dto.MetricType_HISTOGRAM: - return telegraf.Histogram - default: - return telegraf.Untyped +func isProtobuf(header http.Header) bool { + mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { + return false } + + return mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" } // Get Quantiles from summary metric @@ -272,15 +121,6 @@ func makeBuckets(m *dto.Metric) map[string]interface{} { return fields } -// Get labels from metric -func makeLabels(m *dto.Metric) map[string]string { - result := map[string]string{} - for _, lp := range m.Label { - result[lp.GetName()] = lp.GetValue() - } - return result -} - // Get name and value from metric func getNameAndValue(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) @@ -299,22 +139,3 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} { } return fields } - -// Get name and value from metric -func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} { - fields := make(map[string]interface{}) - if m.Gauge != nil { - if !math.IsNaN(m.GetGauge().GetValue()) { - fields[metricName] = float64(m.GetGauge().GetValue()) - } - } else if m.Counter != nil { - if !math.IsNaN(m.GetCounter().GetValue()) { - fields[metricName] = float64(m.GetCounter().GetValue()) - } - } else if m.Untyped != nil { - if !math.IsNaN(m.GetUntyped().GetValue()) { - fields[metricName] = float64(m.GetUntyped().GetValue()) - } - } - return fields -} diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index 7b2bfeca2e128..293e1968d2b5d 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -3,13 +3,10 @@ package prometheus import ( "net/http" "testing" - "time" "github.com/stretchr/testify/assert" ) -var exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. # TYPE cadvisor_version_info gauge cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 @@ -20,9 +17,6 @@ const validUniqueCounter = `# HELP get_token_fail_count Counter of failed Token( get_token_fail_count 0 ` -const validUniqueLine = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source -` - const validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. # TYPE http_request_duration_microseconds summary http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 @@ -46,61 +40,6 @@ apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 ` -const validData = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. -# TYPE cadvisor_version_info gauge -cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 0.013534896000000001 -go_gc_duration_seconds{quantile="0.25"} 0.02469263 -go_gc_duration_seconds{quantile="0.5"} 0.033727822000000005 -go_gc_duration_seconds{quantile="0.75"} 0.03840335 -go_gc_duration_seconds{quantile="1"} 0.049956604 -go_gc_duration_seconds_sum 1970.341293002 -go_gc_duration_seconds_count 65952 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06 -http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07 -http_request_duration_microseconds_count{handler="prometheus"} 9 -# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source -# TYPE get_token_fail_count counter -get_token_fail_count 0 -# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client. -# TYPE apiserver_request_latencies histogram -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025 -apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 -apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 -` - -const prometheusMulti = ` -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - -const prometheusMultiSomeInvalid = ` -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu4 , usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - func TestParseValidPrometheus(t *testing.T) { // Gauge value metrics, err := Parse([]byte(validUniqueGauge), http.Header{}) @@ -163,5 +102,4 @@ func TestParseValidPrometheus(t *testing.T) { assert.Equal(t, map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) - } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 70d72e0b0a379..136e8ae0f6d9d 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -4,17 +4,23 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" + "os" + "strings" "sync" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" + parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` @@ -35,6 +41,9 @@ type Prometheus struct { // Field Selector/s for Kubernetes KubernetesFieldSelector string `toml:"kubernetes_field_selector"` + // Consul SD configuration + ConsulConfig ConsulConfig `toml:"consul"` + // Bearer Token authorization file path BearerToken string `toml:"bearer_token"` BearerTokenString string `toml:"bearer_token_string"` @@ -43,7 +52,7 @@ type Prometheus struct { Username string `toml:"username"` Password string `toml:"password"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + ResponseTimeout config.Duration `toml:"response_timeout"` MetricVersion int `toml:"metric_version"` @@ -53,15 +62,27 @@ type Prometheus struct { Log telegraf.Logger - client *http.Client + client *http.Client + headers map[string]string // Should we scrape Kubernetes services for prometheus annotations - MonitorPods bool `toml:"monitor_kubernetes_pods"` - PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` - lock sync.Mutex - kubernetesPods map[string]URLAndAddress - cancel context.CancelFunc - wg sync.WaitGroup + MonitorPods bool `toml:"monitor_kubernetes_pods"` + PodScrapeScope string `toml:"pod_scrape_scope"` + NodeIP string `toml:"node_ip"` + PodScrapeInterval int `toml:"pod_scrape_interval"` + PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` + lock sync.Mutex + kubernetesPods map[string]URLAndAddress + cancel context.CancelFunc + wg sync.WaitGroup + + // Only for monitor_kubernetes_pods=true and pod_scrape_scope="node" + podLabelSelector labels.Selector + podFieldSelector fields.Selector + isNodeScrapeScope bool + + // List of consul services to scrape + consulServices map[string]URLAndAddress } var sampleConfig = ` @@ -73,12 +94,12 @@ var sampleConfig = ` ## value in both plugins to ensure metrics are round-tripped without ## modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 ## Url tag name (tag containing scrapped url. optional, default is "url") - # url_tag = "scrapeUrl" + # url_tag = "url" ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -93,6 +114,16 @@ var sampleConfig = ` ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + ## Default is 60 seconds. + # pod_scrape_interval = 60 ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" @@ -102,6 +133,19 @@ var sampleConfig = ` # eg. To scrape pods on a specific node # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + ## Scrape Services available in Consul Catalog + # [inputs.prometheus.consul] + # enabled = true + # agent = "http://localhost:8500" + # query_interval = "5m" + + # [[inputs.prometheus.consul.query]] + # name = "a service name" + # tag = "a service tag" + # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' + # [inputs.prometheus.consul.query.tags] + # host = "{{.Node}}" + ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" ## OR @@ -132,8 +176,41 @@ func (p *Prometheus) Description() string { } func (p *Prometheus) Init() error { - if p.MetricVersion != 2 { - p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") + + // Config proccessing for node scrape scope for monitor_kubernetes_pods + p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node") + if p.isNodeScrapeScope { + // Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address + if p.NodeIP == "" || net.ParseIP(p.NodeIP) == nil { + p.Log.Infof("The config node_ip is empty or invalid. Using NODE_IP env var as default.") + + // Check if set as env var and is valid IP address + envVarNodeIP := os.Getenv("NODE_IP") + if envVarNodeIP == "" || net.ParseIP(envVarNodeIP) == nil { + return errors.New("the node_ip config and the environment variable NODE_IP are not set or invalid; " + + "cannot get pod list for monitor_kubernetes_pods using node scrape scope") + } + + p.NodeIP = envVarNodeIP + } + + // Parse label and field selectors - will be used to filter pods after cAdvisor call + var err error + p.podLabelSelector, err = labels.Parse(p.KubernetesLabelSelector) + if err != nil { + return fmt.Errorf("error parsing the specified label selector(s): %s", err.Error()) + } + p.podFieldSelector, err = fields.ParseSelector(p.KubernetesFieldSelector) + if err != nil { + return fmt.Errorf("error parsing the specified field selector(s): %s", err.Error()) + } + isValid, invalidSelector := fieldSelectorIsSupported(p.podFieldSelector) + if !isValid { + return fmt.Errorf("the field selector %s is not supported for pods", invalidSelector) + } + + p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.") + p.Log.Infof("Using the label selector: %v and field selector: %v", p.podLabelSelector, p.podFieldSelector) } return nil @@ -168,7 +245,7 @@ type URLAndAddress struct { } func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { - allURLs := make(map[string]URLAndAddress, 0) + allURLs := make(map[string]URLAndAddress) for _, u := range p.URLs { URL, err := url.Parse(u) if err != nil { @@ -180,6 +257,10 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { p.lock.Lock() defer p.lock.Unlock() + // add all services collected from consul + for k, v := range p.consulServices { + allURLs[k] = v + } // loop through all pods scraped via the prometheus annotation on the pods for k, v := range p.kubernetesPods { allURLs[k] = v @@ -217,6 +298,10 @@ func (p *Prometheus) Gather(acc telegraf.Accumulator) error { return err } p.client = client + p.headers = map[string]string{ + "User-Agent": internal.ProductToken(), + "Accept": acceptHeader, + } } var wg sync.WaitGroup @@ -249,7 +334,7 @@ func (p *Prometheus) createHTTPClient() (*http.Client, error) { TLSClientConfig: tlsCfg, DisableKeepAlives: true, }, - Timeout: p.ResponseTimeout.Duration, + Timeout: time.Duration(p.ResponseTimeout), } return client, nil @@ -282,7 +367,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return c, err }, }, - Timeout: p.ResponseTimeout.Duration, + Timeout: time.Duration(p.ResponseTimeout), } } else { if u.URL.Path == "" { @@ -294,10 +379,10 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } } - req.Header.Add("Accept", acceptHeader) + p.addHeaders(req) if p.BearerToken != "" { - token, err := ioutil.ReadFile(p.BearerToken) + token, err := os.ReadFile(p.BearerToken) if err != nil { return err } @@ -323,13 +408,14 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading body: %s", err) } if p.MetricVersion == 2 { - metrics, err = ParseV2(body, resp.Header) + parser := parser_v2.Parser{Header: resp.Header} + metrics, err = parser.Parse(body) } else { metrics, err = Parse(body, resp.Header) } @@ -370,28 +456,66 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return nil } -// Start will start the Kubernetes scraping if enabled in the configuration -func (p *Prometheus) Start(a telegraf.Accumulator) error { +func (p *Prometheus) addHeaders(req *http.Request) { + for header, value := range p.headers { + req.Header.Add(header, value) + } +} + +/* Check if the field selector specified is valid. + * See ToSelectableFields() for list of fields that are selectable: + * https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go + */ +func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) { + supportedFieldsToSelect := map[string]bool{ + "spec.nodeName": true, + "spec.restartPolicy": true, + "spec.schedulerName": true, + "spec.serviceAccountName": true, + "status.phase": true, + "status.podIP": true, + "status.nominatedNodeName": true, + } + + for _, requirement := range fieldSelector.Requirements() { + if !supportedFieldsToSelect[requirement.Field] { + return false, requirement.Field + } + } + + return true, "" +} + +// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration +func (p *Prometheus) Start(_ telegraf.Accumulator) error { + var ctx context.Context + p.wg = sync.WaitGroup{} + ctx, p.cancel = context.WithCancel(context.Background()) + + if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 { + if err := p.startConsul(ctx); err != nil { + return err + } + } if p.MonitorPods { - var ctx context.Context - ctx, p.cancel = context.WithCancel(context.Background()) - return p.start(ctx) + if err := p.startK8s(ctx); err != nil { + return err + } } return nil } func (p *Prometheus) Stop() { - if p.MonitorPods { - p.cancel() - } + p.cancel() p.wg.Wait() } func init() { inputs.Add("prometheus", func() telegraf.Input { return &Prometheus{ - ResponseTimeout: internal.Duration{Duration: time.Second * 3}, + ResponseTimeout: config.Duration(time.Second * 3), kubernetesPods: map[string]URLAndAddress{}, + consulServices: map[string]URLAndAddress{}, URLTag: "url", } }) diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index d33cba273c276..ea8ca0e9346ab 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/http/httptest" "net/url" + "os" "testing" "time" @@ -13,6 +14,7 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/fields" ) const sampleTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. @@ -49,7 +51,8 @@ go_goroutines 15 1490802350000 func TestPrometheusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -74,7 +77,8 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -99,13 +103,14 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { assert.True(t, acc.TagValue("test_metric", "url") == ts.URL) } -func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { +func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -128,7 +133,8 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleSummaryTextFormat) + _, err := fmt.Fprintln(w, sampleSummaryTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -147,7 +153,6 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") - } func TestSummaryMayContainNaN(t *testing.T) { @@ -159,7 +164,8 @@ go_gc_duration_seconds_sum 42.0 go_gc_duration_seconds_count 42 ` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, data) + _, err := fmt.Fprintln(w, data) + require.NoError(t, err) })) defer ts.Close() @@ -215,7 +221,8 @@ go_gc_duration_seconds_count 42 func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleGaugeTextFormat) + _, err := fmt.Fprintln(w, sampleGaugeTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -234,3 +241,50 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) } + +func TestUnsupportedFieldSelector(t *testing.T) { + fieldSelectorString := "spec.containerName=container" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + + fieldSelector, _ := fields.ParseSelector(prom.KubernetesFieldSelector) + isValid, invalidSelector := fieldSelectorIsSupported(fieldSelector) + assert.Equal(t, false, isValid) + assert.Equal(t, "spec.containerName", invalidSelector) +} + +func TestInitConfigErrors(t *testing.T) { + p := &Prometheus{ + MetricVersion: 2, + Log: testutil.Logger{}, + URLs: nil, + URLTag: "url", + MonitorPods: true, + PodScrapeScope: "node", + PodScrapeInterval: 60, + } + + // Both invalid IP addresses + p.NodeIP = "10.240.0.0.0" + require.NoError(t, os.Setenv("NODE_IP", "10.000.0.0.0")) + err := p.Init() + require.Error(t, err) + expectedMessage := "the node_ip config and the environment variable NODE_IP are not set or invalid; cannot get pod list for monitor_kubernetes_pods using node scrape scope" + require.Equal(t, expectedMessage, err.Error()) + require.NoError(t, os.Setenv("NODE_IP", "10.000.0.0")) + + p.KubernetesLabelSelector = "label0==label0, label0 in (=)" + err = p.Init() + expectedMessage = "error parsing the specified label selector(s): unable to parse requirement: found '=', expected: ',', ')' or identifier" + require.Error(t, err, expectedMessage) + p.KubernetesLabelSelector = "label0==label" + + p.KubernetesFieldSelector = "field," + err = p.Init() + expectedMessage = "error parsing the specified field selector(s): invalid selector: 'field,'; can't understand 'field'" + require.Error(t, err, expectedMessage) + + p.KubernetesFieldSelector = "spec.containerNames=containerNames" + err = p.Init() + expectedMessage = "the field selector spec.containerNames is not supported for pods" + require.Error(t, err, expectedMessage) +} diff --git a/plugins/inputs/proxmox/README.md b/plugins/inputs/proxmox/README.md index ac81633a3f461..db9f57e974d2d 100644 --- a/plugins/inputs/proxmox/README.md +++ b/plugins/inputs/proxmox/README.md @@ -11,6 +11,8 @@ Telegraf minimum version: Telegraf 1.16.0 ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. base_url = "https://localhost:8006/api2/json" api_token = "USER@REALM!TOKENID=UUID" + ## Node name, defaults to OS hostname + # node_name = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 13dcb4a95f304..101b458630eeb 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -2,19 +2,24 @@ package proxmox import ( "encoding/json" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" - "io/ioutil" + "errors" + "io" "net/http" "net/url" "os" "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) var sampleConfig = ` ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. base_url = "https://localhost:8006/api2/json" api_token = "USER@REALM!TOKENID=UUID" + ## Node name, defaults to OS hostname + # node_name = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -48,11 +53,11 @@ func (px *Proxmox) Gather(acc telegraf.Accumulator) error { } func (px *Proxmox) Init() error { - hostname, err := os.Hostname() - if err != nil { - return err + // Set hostname as default node name for backwards compatibility + if px.NodeName == "" { + hostname, _ := os.Hostname() + px.NodeName = hostname } - px.hostname = hostname tlsCfg, err := px.ClientConfig.TLSConfig() if err != nil { @@ -62,39 +67,43 @@ func (px *Proxmox) Init() error { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: px.ResponseTimeout.Duration, + Timeout: time.Duration(px.ResponseTimeout), } return nil } func init() { - px := Proxmox{ - requestFunction: performRequest, - } - - inputs.Add("proxmox", func() telegraf.Input { return &px }) + inputs.Add("proxmox", func() telegraf.Input { + return &Proxmox{ + requestFunction: performRequest, + } + }) } func getNodeSearchDomain(px *Proxmox) error { - apiUrl := "/nodes/" + px.hostname + "/dns" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) + apiURL := "/nodes/" + px.NodeName + "/dns" + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { return err } - var nodeDns NodeDns - err = json.Unmarshal(jsonData, &nodeDns) + var nodeDNS NodeDNS + err = json.Unmarshal(jsonData, &nodeDNS) if err != nil { return err } - px.nodeSearchDomain = nodeDns.Data.Searchdomain + + if nodeDNS.Data.Searchdomain == "" { + return errors.New("search domain is not set") + } + px.nodeSearchDomain = nodeDNS.Data.Searchdomain return nil } -func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) { - request, err := http.NewRequest(method, px.BaseURL+apiUrl, strings.NewReader(data.Encode())) +func performRequest(px *Proxmox, apiURL string, method string, data url.Values) ([]byte, error) { + request, err := http.NewRequest(method, px.BaseURL+apiURL, strings.NewReader(data.Encode())) if err != nil { return nil, err } @@ -106,7 +115,7 @@ func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) } defer resp.Body.Close() - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -115,15 +124,15 @@ func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) } func gatherLxcData(px *Proxmox, acc telegraf.Accumulator) { - gatherVmData(px, acc, LXC) + gatherVMData(px, acc, LXC) } func gatherQemuData(px *Proxmox, acc telegraf.Accumulator) { - gatherVmData(px, acc, QEMU) + gatherVMData(px, acc, QEMU) } -func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { - vmStats, err := getVmStats(px, rt) +func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { + vmStats, err := getVMStats(px, rt) if err != nil { px.Log.Error("Error getting VM stats: %v", err) return @@ -131,75 +140,100 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { // For each VM add metrics to Accumulator for _, vmStat := range vmStats.Data { - vmConfig, err := getVmConfig(px, vmStat.ID, rt) + vmConfig, err := getVMConfig(px, vmStat.ID, rt) if err != nil { - px.Log.Error("Error getting VM config: %v", err) + px.Log.Errorf("Error getting VM config: %v", err) return } + + if vmConfig.Data.Template == 1 { + px.Log.Debugf("Ignoring template VM %s (%s)", vmStat.ID, vmStat.Name) + continue + } + tags := getTags(px, vmStat.Name, vmConfig, rt) - fields, err := getFields(vmStat) + currentVMStatus, err := getCurrentVMStatus(px, rt, vmStat.ID) if err != nil { - px.Log.Error("Error getting VM measurements: %v", err) + px.Log.Errorf("Error getting VM curent VM status: %v", err) return } + + fields := getFields(currentVMStatus) acc.AddFields("proxmox", fields, tags) } } -func getVmStats(px *Proxmox, rt ResourceType) (VmStats, error) { - apiUrl := "/nodes/" + px.hostname + "/" + string(rt) - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) +func getCurrentVMStatus(px *Proxmox, rt ResourceType, id json.Number) (VMStat, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(id) + "/status/current" + + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) + if err != nil { + return VMStat{}, err + } + + var currentVMStatus VMCurrentStats + err = json.Unmarshal(jsonData, ¤tVMStatus) if err != nil { - return VmStats{}, err + return VMStat{}, err } - var vmStats VmStats + return currentVMStatus.Data, nil +} + +func getVMStats(px *Proxmox, rt ResourceType) (VMStats, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) + if err != nil { + return VMStats{}, err + } + + var vmStats VMStats err = json.Unmarshal(jsonData, &vmStats) if err != nil { - return VmStats{}, err + return VMStats{}, err } return vmStats, nil } -func getVmConfig(px *Proxmox, vmId string, rt ResourceType) (VmConfig, error) { - apiUrl := "/nodes/" + px.hostname + "/" + string(rt) + "/" + vmId + "/config" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) +func getVMConfig(px *Proxmox, vmID json.Number, rt ResourceType) (VMConfig, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(vmID) + "/config" + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { - return VmConfig{}, err + return VMConfig{}, err } - var vmConfig VmConfig + var vmConfig VMConfig err = json.Unmarshal(jsonData, &vmConfig) if err != nil { - return VmConfig{}, err + return VMConfig{}, err } return vmConfig, nil } -func getFields(vmStat VmStat) (map[string]interface{}, error) { - mem_total, mem_used, mem_free, mem_used_percentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) - swap_total, swap_used, swap_free, swap_used_percentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) - disk_total, disk_used, disk_free, disk_used_percentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) +func getFields(vmStat VMStat) map[string]interface{} { + memTotal, memUsed, memFree, memUsedPercentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) + swapTotal, swapUsed, swapFree, swapUsedPercentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) + diskTotal, diskUsed, diskFree, diskUsedPercentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) return map[string]interface{}{ "status": vmStat.Status, "uptime": jsonNumberToInt64(vmStat.Uptime), - "cpuload": jsonNumberToFloat64(vmStat.CpuLoad), - "mem_used": mem_used, - "mem_total": mem_total, - "mem_free": mem_free, - "mem_used_percentage": mem_used_percentage, - "swap_used": swap_used, - "swap_total": swap_total, - "swap_free": swap_free, - "swap_used_percentage": swap_used_percentage, - "disk_used": disk_used, - "disk_total": disk_total, - "disk_free": disk_free, - "disk_used_percentage": disk_used_percentage, - }, nil + "cpuload": jsonNumberToFloat64(vmStat.CPULoad), + "mem_used": memUsed, + "mem_total": memTotal, + "mem_free": memFree, + "mem_used_percentage": memUsedPercentage, + "swap_used": swapUsed, + "swap_total": swapTotal, + "swap_free": swapFree, + "swap_used_percentage": swapUsedPercentage, + "disk_used": diskUsed, + "disk_total": diskTotal, + "disk_free": diskFree, + "disk_used_percentage": diskUsedPercentage, + } } func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, float64) { @@ -232,7 +266,7 @@ func jsonNumberToFloat64(value json.Number) float64 { return float64Value } -func getTags(px *Proxmox, name string, vmConfig VmConfig, rt ResourceType) map[string]string { +func getTags(px *Proxmox, name string, vmConfig VMConfig, rt ResourceType) map[string]string { domain := vmConfig.Data.Searchdomain if len(domain) == 0 { domain = px.nodeSearchDomain @@ -245,7 +279,7 @@ func getTags(px *Proxmox, name string, vmConfig VmConfig, rt ResourceType) map[s fqdn := hostname + "." + domain return map[string]string{ - "node_fqdn": px.hostname + "." + px.nodeSearchDomain, + "node_fqdn": px.NodeName + "." + px.nodeSearchDomain, "vm_name": name, "vm_fqdn": fqdn, "vm_type": string(rt), diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index 274ebdf69ff28..741a272829474 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -1,33 +1,40 @@ package proxmox import ( - "github.com/bmizerany/assert" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "net/url" "strings" "testing" + + "github.com/bmizerany/assert" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}` var qemuTestData = `{"data":[{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}]}` var qemuConfigTestData = `{"data":{"hostname":"qemu1","searchdomain":"test.example.com"}}` -var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}]}` +var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"},{"vmid":112,"type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container2"}]}` var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.example.com"}}` +var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` +var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` -func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) { +func performTestRequest(_ *Proxmox, apiURL string, _ string, _ url.Values) ([]byte, error) { var bytedata = []byte("") - if strings.HasSuffix(apiUrl, "dns") { + if strings.HasSuffix(apiURL, "dns") { bytedata = []byte(nodeSearchDomainTestData) - } else if strings.HasSuffix(apiUrl, "qemu") { + } else if strings.HasSuffix(apiURL, "qemu") { bytedata = []byte(qemuTestData) - } else if strings.HasSuffix(apiUrl, "113/config") { + } else if strings.HasSuffix(apiURL, "113/config") { bytedata = []byte(qemuConfigTestData) - } else if strings.HasSuffix(apiUrl, "lxc") { + } else if strings.HasSuffix(apiURL, "lxc") { bytedata = []byte(lxcTestData) - } else if strings.HasSuffix(apiUrl, "111/config") { + } else if strings.HasSuffix(apiURL, "111/config") { bytedata = []byte(lxcConfigTestData) + } else if strings.HasSuffix(apiURL, "111/status/current") { + bytedata = []byte(lxcCurrentStatusTestData) + } else if strings.HasSuffix(apiURL, "113/status/current") { + bytedata = []byte(qemuCurrentStatusTestData) } return bytedata, nil @@ -36,12 +43,12 @@ func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Valu func setUp(t *testing.T) *Proxmox { px := &Proxmox{ requestFunction: performTestRequest, + NodeName: "testnode", } require.NoError(t, px.Init()) - // Override hostname and logger for test - px.hostname = "testnode" + // Override logger for test px.Log = testutil.Logger{} return px } diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index eef5dffff1f28..2f16841b2ff8b 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -2,20 +2,22 @@ package proxmox import ( "encoding/json" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/tls" "net/http" "net/url" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" ) type Proxmox struct { - BaseURL string `toml:"base_url"` - APIToken string `toml:"api_token"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + BaseURL string `toml:"base_url"` + APIToken string `toml:"api_token"` + ResponseTimeout config.Duration `toml:"response_timeout"` + NodeName string `toml:"node_name"` + tls.ClientConfig - hostname string httpClient *http.Client nodeSearchDomain string @@ -30,12 +32,16 @@ var ( LXC ResourceType = "lxc" ) -type VmStats struct { - Data []VmStat `json:"data"` +type VMStats struct { + Data []VMStat `json:"data"` +} + +type VMCurrentStats struct { + Data VMStat `json:"data"` } -type VmStat struct { - ID string `json:"vmid"` +type VMStat struct { + ID json.Number `json:"vmid"` Name string `json:"name"` Status string `json:"status"` UsedMem json.Number `json:"mem"` @@ -45,17 +51,18 @@ type VmStat struct { UsedSwap json.Number `json:"swap"` TotalSwap json.Number `json:"maxswap"` Uptime json.Number `json:"uptime"` - CpuLoad json.Number `json:"cpu"` + CPULoad json.Number `json:"cpu"` } -type VmConfig struct { +type VMConfig struct { Data struct { Searchdomain string `json:"searchdomain"` Hostname string `json:"hostname"` + Template int `json:"template"` } `json:"data"` } -type NodeDns struct { +type NodeDNS struct { Data struct { Searchdomain string `json:"search"` } `json:"data"` diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 1d0e30aa88ed5..9976012fe368c 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -2,12 +2,12 @@ package puppetagent import ( "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" "os" "reflect" "strings" + "gopkg.in/yaml.v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -84,7 +84,6 @@ func (pa *PuppetAgent) Description() string { // Gather reads stats from all configured servers accumulates stats func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { - if len(pa.Location) == 0 { pa.Location = "/var/lib/puppet/state/last_run_summary.yaml" } @@ -93,7 +92,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("%s", err) } - fh, err := ioutil.ReadFile(pa.Location) + fh, err := os.ReadFile(pa.Location) if err != nil { return fmt.Errorf("%s", err) } diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index b1c447887f23c..6ba769ac5dd37 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -1,8 +1,10 @@ package puppetagent import ( - "github.com/influxdata/telegraf/testutil" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { @@ -11,7 +13,7 @@ func TestGather(t *testing.T) { pa := PuppetAgent{ Location: "last_run_summary.yaml", } - pa.Gather(&acc) + require.NoError(t, pa.Gather(&acc)) tags := map[string]string{"location": "last_run_summary.yaml"} fields := map[string]interface{}{ diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 1274b4ee230f8..5f106642adeb6 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -48,6 +48,12 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management ## specified, metrics for all exchanges are gathered. # exchanges = ["telegraf"] + ## Metrics to include and exclude. Globs accepted. + ## Note that an empty array for both will include all metrics + ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" + # metric_include = [] + # metric_exclude = [] + ## Queues to include and exclude. Globs accepted. ## Note that an empty array for both will include all queues # queue_name_include = [] diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 4d8050c33fbca..158b8d5ed6b21 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -3,14 +3,15 @@ package rabbitmq import ( "encoding/json" "fmt" + "io" "net/http" "strconv" "sync" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -40,22 +41,25 @@ type RabbitMQ struct { Password string `toml:"password"` tls.ClientConfig - ResponseHeaderTimeout internal.Duration `toml:"header_timeout"` - ClientTimeout internal.Duration `toml:"client_timeout"` + ResponseHeaderTimeout config.Duration `toml:"header_timeout"` + ClientTimeout config.Duration `toml:"client_timeout"` Nodes []string `toml:"nodes"` Queues []string `toml:"queues"` Exchanges []string `toml:"exchanges"` + MetricInclude []string `toml:"metric_include"` + MetricExclude []string `toml:"metric_exclude"` QueueInclude []string `toml:"queue_name_include"` QueueExclude []string `toml:"queue_name_exclude"` FederationUpstreamInclude []string `toml:"federation_upstream_include"` FederationUpstreamExclude []string `toml:"federation_upstream_exclude"` - Client *http.Client `toml:"-"` + Log telegraf.Logger `toml:"-"` - filterCreated bool + client *http.Client excludeEveryQueue bool + metricFilter filter.Filter queueFilter filter.Filter upstreamFilter filter.Filter } @@ -157,17 +161,17 @@ type Node struct { Uptime int64 `json:"uptime"` MnesiaDiskTxCount int64 `json:"mnesia_disk_tx_count"` MnesiaDiskTxCountDetails Details `json:"mnesia_disk_tx_count_details"` - MnesiaRamTxCount int64 `json:"mnesia_ram_tx_count"` - MnesiaRamTxCountDetails Details `json:"mnesia_ram_tx_count_details"` + MnesiaRAMTxCount int64 `json:"mnesia_ram_tx_count"` + MnesiaRAMTxCountDetails Details `json:"mnesia_ram_tx_count_details"` GcNum int64 `json:"gc_num"` GcNumDetails Details `json:"gc_num_details"` GcBytesReclaimed int64 `json:"gc_bytes_reclaimed"` GcBytesReclaimedDetails Details `json:"gc_bytes_reclaimed_details"` - IoReadAvgTime int64 `json:"io_read_avg_time"` + IoReadAvgTime float64 `json:"io_read_avg_time"` IoReadAvgTimeDetails Details `json:"io_read_avg_time_details"` IoReadBytes int64 `json:"io_read_bytes"` IoReadBytesDetails Details `json:"io_read_bytes_details"` - IoWriteAvgTime int64 `json:"io_write_avg_time"` + IoWriteAvgTime float64 `json:"io_write_avg_time"` IoWriteAvgTimeDetails Details `json:"io_write_avg_time_details"` IoWriteBytes int64 `json:"io_write_bytes"` IoWriteBytesDetails Details `json:"io_write_bytes_details"` @@ -226,32 +230,44 @@ type MemoryResponse struct { // Memory details type Memory struct { - ConnectionReaders int64 `json:"connection_readers"` - ConnectionWriters int64 `json:"connection_writers"` - ConnectionChannels int64 `json:"connection_channels"` - ConnectionOther int64 `json:"connection_other"` - QueueProcs int64 `json:"queue_procs"` - QueueSlaveProcs int64 `json:"queue_slave_procs"` - Plugins int64 `json:"plugins"` - OtherProc int64 `json:"other_proc"` - Metrics int64 `json:"metrics"` - MgmtDb int64 `json:"mgmt_db"` - Mnesia int64 `json:"mnesia"` - OtherEts int64 `json:"other_ets"` - Binary int64 `json:"binary"` - MsgIndex int64 `json:"msg_index"` - Code int64 `json:"code"` - Atom int64 `json:"atom"` - OtherSystem int64 `json:"other_system"` - AllocatedUnused int64 `json:"allocated_unused"` - ReservedUnallocated int64 `json:"reserved_unallocated"` - Total int64 `json:"total"` + ConnectionReaders int64 `json:"connection_readers"` + ConnectionWriters int64 `json:"connection_writers"` + ConnectionChannels int64 `json:"connection_channels"` + ConnectionOther int64 `json:"connection_other"` + QueueProcs int64 `json:"queue_procs"` + QueueSlaveProcs int64 `json:"queue_slave_procs"` + Plugins int64 `json:"plugins"` + OtherProc int64 `json:"other_proc"` + Metrics int64 `json:"metrics"` + MgmtDb int64 `json:"mgmt_db"` + Mnesia int64 `json:"mnesia"` + OtherEts int64 `json:"other_ets"` + Binary int64 `json:"binary"` + MsgIndex int64 `json:"msg_index"` + Code int64 `json:"code"` + Atom int64 `json:"atom"` + OtherSystem int64 `json:"other_system"` + AllocatedUnused int64 `json:"allocated_unused"` + ReservedUnallocated int64 `json:"reserved_unallocated"` + Total interface{} `json:"total"` +} + +// Error response +type ErrorResponse struct { + Error string `json:"error"` + Reason string `json:"reason"` } // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator) -var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges, gatherFederationLinks} +var gatherFunctions = map[string]gatherFunc{ + "exchange": gatherExchanges, + "federation": gatherFederationLinks, + "node": gatherNodes, + "overview": gatherOverview, + "queue": gatherQueues, +} var sampleConfig = ` ## Management Plugin url. (default: http://localhost:15672) @@ -291,6 +307,12 @@ var sampleConfig = ` ## specified, metrics for all exchanges are gathered. # exchanges = ["telegraf"] + ## Metrics to include and exclude. Globs accepted. + ## Note that an empty array for both will include all metrics + ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" + # metric_include = [] + # metric_exclude = [] + ## Queues to include and exclude. Globs accepted. ## Note that an empty array for both will include all queues queue_name_include = [] @@ -323,39 +345,47 @@ func (r *RabbitMQ) Description() string { return "Reads metrics from RabbitMQ servers via the Management Plugin" } -// Gather ... -func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { - if r.Client == nil { - tlsCfg, err := r.ClientConfig.TLSConfig() - if err != nil { - return err - } - tr := &http.Transport{ - ResponseHeaderTimeout: r.ResponseHeaderTimeout.Duration, - TLSClientConfig: tlsCfg, - } - r.Client = &http.Client{ - Transport: tr, - Timeout: r.ClientTimeout.Duration, - } +func (r *RabbitMQ) Init() error { + var err error + + // Create gather filters + if err := r.createQueueFilter(); err != nil { + return err + } + if err := r.createUpstreamFilter(); err != nil { + return err } - // Create gather filters if not already created - if !r.filterCreated { - err := r.createQueueFilter() - if err != nil { - return err - } - err = r.createUpstreamFilter() - if err != nil { - return err - } - r.filterCreated = true + // Create a filter for the metrics + if r.metricFilter, err = filter.NewIncludeExcludeFilter(r.MetricInclude, r.MetricExclude); err != nil { + return err } + tlsCfg, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(r.ResponseHeaderTimeout), + TLSClientConfig: tlsCfg, + } + r.client = &http.Client{ + Transport: tr, + Timeout: time.Duration(r.ClientTimeout), + } + + return nil +} + +// Gather ... +func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup - wg.Add(len(gatherFunctions)) - for _, f := range gatherFunctions { + for name, f := range gatherFunctions { + // Query only metrics that are supported + if !r.metricFilter.Match(name) { + continue + } + wg.Add(1) go func(gf gatherFunc) { defer wg.Done() gf(r, acc) @@ -366,15 +396,16 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { return nil } -func (r *RabbitMQ) requestJSON(u string, target interface{}) error { +func (r *RabbitMQ) requestEndpoint(u string) ([]byte, error) { if r.URL == "" { r.URL = DefaultURL } - u = fmt.Sprintf("%s%s", r.URL, u) + endpoint := r.URL + u + r.Log.Debugf("Requesting %q...", endpoint) - req, err := http.NewRequest("GET", u, nil) + req, err := http.NewRequest("GET", endpoint, nil) if err != nil { - return err + return nil, err } username := r.Username @@ -389,14 +420,37 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error { req.SetBasicAuth(username, password) - resp, err := r.Client.Do(req) + resp, err := r.client.Do(req) if err != nil { - return err + return nil, err } - defer resp.Body.Close() - json.NewDecoder(resp.Body).Decode(target) + r.Log.Debugf("HTTP status code: %v %v", resp.StatusCode, http.StatusText(resp.StatusCode)) + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("getting %q failed: %v %v", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return io.ReadAll(resp.Body) +} + +func (r *RabbitMQ) requestJSON(u string, target interface{}) error { + buf, err := r.requestEndpoint(u) + if err != nil { + return err + } + if err := json.Unmarshal(buf, target); err != nil { + if _, ok := err.(*json.UnmarshalTypeError); ok { + // Try to get the error reason from the response + var errResponse ErrorResponse + if json.Unmarshal(buf, &errResponse) == nil && errResponse.Error != "" { + // Return the error reason in the response + return fmt.Errorf("error response trying to get %q: %q (reason: %q)", u, errResponse.Error, errResponse.Reason) + } + } + + return fmt.Errorf("decoding answer from %q failed: %v", u, err) + } return nil } @@ -491,8 +545,8 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { "uptime": node.Uptime, "mnesia_disk_tx_count": node.MnesiaDiskTxCount, "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate, - "mnesia_ram_tx_count": node.MnesiaRamTxCount, - "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate, + "mnesia_ram_tx_count": node.MnesiaRAMTxCount, + "mnesia_ram_tx_count_rate": node.MnesiaRAMTxCountDetails.Rate, "gc_num": node.GcNum, "gc_num_rate": node.GcNumDetails.Rate, "gc_bytes_reclaimed": node.GcBytesReclaimed, @@ -535,7 +589,27 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { fields["mem_other_system"] = memory.Memory.OtherSystem fields["mem_allocated_unused"] = memory.Memory.AllocatedUnused fields["mem_reserved_unallocated"] = memory.Memory.ReservedUnallocated - fields["mem_total"] = memory.Memory.Total + switch v := memory.Memory.Total.(type) { + case float64: + fields["mem_total"] = int64(v) + case map[string]interface{}: + var foundEstimator bool + for _, estimator := range []string{"rss", "allocated", "erlang"} { + if x, found := v[estimator]; found { + if total, ok := x.(float64); ok { + fields["mem_total"] = int64(total) + foundEstimator = true + break + } + acc.AddError(fmt.Errorf("unknown type %T for %q total memory", x, estimator)) + } + } + if !foundEstimator { + acc.AddError(fmt.Errorf("no known memory estimation in %v", v)) + } + default: + acc.AddError(fmt.Errorf("unknown type %T for total memory", memory.Memory.Total)) + } } acc.AddFields("rabbitmq_node", fields, tags) @@ -764,8 +838,8 @@ func (r *RabbitMQ) shouldGatherFederationLink(link FederationLink) bool { func init() { inputs.Add("rabbitmq", func() telegraf.Input { return &RabbitMQ{ - ResponseHeaderTimeout: internal.Duration{Duration: DefaultResponseHeaderTimeout * time.Second}, - ClientTimeout: internal.Duration{Duration: DefaultClientTimeout * time.Second}, + ResponseHeaderTimeout: config.Duration(DefaultResponseHeaderTimeout * time.Second), + ClientTimeout: config.Duration(DefaultClientTimeout * time.Second), } }) } diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 869e8036d157d..e867b1e2dcb61 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -4,195 +4,668 @@ import ( "fmt" "net/http" "net/http/httptest" - "testing" + "os" + "time" - "io/ioutil" + "testing" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestRabbitMQGeneratesMetrics(t *testing.T) { +func TestRabbitMQGeneratesMetricsSet1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var jsonFilePath string switch r.URL.Path { case "/api/overview": - jsonFilePath = "testdata/overview.json" + jsonFilePath = "testdata/set1/overview.json" case "/api/nodes": - jsonFilePath = "testdata/nodes.json" + jsonFilePath = "testdata/set1/nodes.json" case "/api/queues": - jsonFilePath = "testdata/queues.json" + jsonFilePath = "testdata/set1/queues.json" case "/api/exchanges": - jsonFilePath = "testdata/exchanges.json" + jsonFilePath = "testdata/set1/exchanges.json" case "/api/federation-links": - jsonFilePath = "testdata/federation-links.json" + jsonFilePath = "testdata/set1/federation-links.json" case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory": - jsonFilePath = "testdata/memory.json" + jsonFilePath = "testdata/set1/memory.json" default: - panic("Cannot handle request") + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) - if err != nil { - panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) - } - - w.Write(data) + _, err = w.Write(data) + require.NoError(t, err) })) defer ts.Close() - r := &RabbitMQ{ + // Define test cases + expected := []telegraf.Metric{ + testutil.MustMetric("rabbitmq_overview", + map[string]string{ + "url": ts.URL, + }, + map[string]interface{}{ + "messages": int64(5), + "messages_ready": int64(32), + "messages_unacked": int64(27), + "messages_acked": int64(5246), + "messages_delivered": int64(5234), + "messages_delivered_get": int64(3333), + "messages_published": int64(5258), + "channels": int64(44), + "connections": int64(44), + "consumers": int64(65), + "exchanges": int64(43), + "queues": int64(62), + "clustering_listeners": int64(2), + "amqp_listeners": int64(2), + "return_unroutable": int64(10), + "return_unroutable_rate": float64(3.3), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqlocal-0.rmqlocal.ankorabbitstatefulset3.svc.cluster.local", + "queue": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "url": ts.URL, + "vhost": "sorandomsorandom", + }, + map[string]interface{}{ + "consumers": int64(3), + "consumer_utilisation": float64(1.0), + "memory": int64(143776), + "message_bytes": int64(3), + "message_bytes_ready": int64(4), + "message_bytes_unacked": int64(5), + "message_bytes_ram": int64(6), + "message_bytes_persist": int64(7), + "messages": int64(44), + "messages_ready": int64(32), + "messages_unack": int64(44), + "messages_ack": int64(3457), + "messages_ack_rate": float64(9.9), + "messages_deliver": int64(22222), + "messages_deliver_rate": float64(333.4), + "messages_deliver_get": int64(3457), + "messages_deliver_get_rate": float64(0.2), + "messages_publish": int64(3457), + "messages_publish_rate": float64(11.2), + "messages_redeliver": int64(33), + "messages_redeliver_rate": float64(2.5), + "idle_since": "2015-11-01 8:22:14", + "slave_nodes": int64(1), + "synchronised_slave_nodes": int64(1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_node", + map[string]string{ + "node": "rabbit@vagrant-ubuntu-trusty-64", + "url": ts.URL, + }, + map[string]interface{}{ + "disk_free": int64(3776), + "disk_free_limit": int64(50000000), + "disk_free_alarm": int64(0), + "fd_total": int64(1024), + "fd_used": int64(63), + "mem_limit": int64(2503), + "mem_used": int64(159707080), + "mem_alarm": int64(1), + "proc_total": int64(1048576), + "proc_used": int64(783), + "run_queue": int64(0), + "sockets_total": int64(829), + "sockets_used": int64(45), + "uptime": int64(7464827), + "running": int64(1), + "mnesia_disk_tx_count": int64(16), + "mnesia_ram_tx_count": int64(296), + "mnesia_disk_tx_count_rate": float64(1.1), + "mnesia_ram_tx_count_rate": float64(2.2), + "gc_num": int64(57280132), + "gc_bytes_reclaimed": int64(2533), + "gc_num_rate": float64(274.2), + "gc_bytes_reclaimed_rate": float64(16490856.3), + "io_read_avg_time": float64(983.0), + "io_read_avg_time_rate": float64(88.77), + "io_read_bytes": int64(1111), + "io_read_bytes_rate": float64(99.99), + "io_write_avg_time": float64(134.0), + "io_write_avg_time_rate": float64(4.32), + "io_write_bytes": int64(823), + "io_write_bytes_rate": float64(32.8), + "mem_connection_readers": int64(1234), + "mem_connection_writers": int64(5678), + "mem_connection_channels": int64(1133), + "mem_connection_other": int64(2840), + "mem_queue_procs": int64(2840), + "mem_queue_slave_procs": int64(0), + "mem_plugins": int64(1755976), + "mem_other_proc": int64(23056584), + "mem_metrics": int64(196536), + "mem_mgmt_db": int64(491272), + "mem_mnesia": int64(115600), + "mem_other_ets": int64(2121872), + "mem_binary": int64(418848), + "mem_msg_index": int64(42848), + "mem_code": int64(25179322), + "mem_atom": int64(1041593), + "mem_other_system": int64(14741981), + "mem_allocated_unused": int64(38208528), + "mem_reserved_unallocated": int64(0), + "mem_total": int64(83025920), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "true", + "durable": "false", + "exchange": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "sorandomsorandom", + }, + map[string]interface{}{ + "messages_publish_in": int64(3678), + "messages_publish_in_rate": float64(3.2), + "messages_publish_out": int64(3677), + "messages_publish_out_rate": float64(5.1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_federation", + map[string]string{ + "queue": "exampleLocalQueue", + "type": "queue", + "upstream": "ExampleFederationUpstream", + "upstream_queue": "exampleUpstreamQueue", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "acks_uncommitted": int64(1), + "consumers": int64(2), + "messages_unacknowledged": int64(3), + "messages_uncommitted": int64(4), + "messages_unconfirmed": int64(5), + "messages_confirm": int64(67), + "messages_publish": int64(890), + "messages_return_unroutable": int64(1), + }, + time.Unix(0, 0), + ), + } + + // Run the test + plugin := &RabbitMQ{ URL: ts.URL, + Log: testutil.Logger{}, } + require.NoError(t, plugin.Init()) acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) - err := acc.GatherError(r.Gather) - require.NoError(t, err) - - overviewMetrics := map[string]interface{}{ - "messages": 5, - "messages_ready": 32, - "messages_unacked": 27, - "messages_acked": 5246, - "messages_delivered": 5234, - "messages_delivered_get": 3333, - "messages_published": 5258, - "channels": 44, - "connections": 44, - "consumers": 65, - "exchanges": 43, - "queues": 62, - "clustering_listeners": 2, - "amqp_listeners": 2, - "return_unroutable": 10, - "return_unroutable_rate": 3.3, - } - compareMetrics(t, overviewMetrics, acc, "rabbitmq_overview") - - queuesMetrics := map[string]interface{}{ - "consumers": 3, - "consumer_utilisation": 1.0, - "memory": 143776, - "message_bytes": 3, - "message_bytes_ready": 4, - "message_bytes_unacked": 5, - "message_bytes_ram": 6, - "message_bytes_persist": 7, - "messages": 44, - "messages_ready": 32, - "messages_unack": 44, - "messages_ack": 3457, - "messages_ack_rate": 9.9, - "messages_deliver": 22222, - "messages_deliver_rate": 333.4, - "messages_deliver_get": 3457, - "messages_deliver_get_rate": 0.2, - "messages_publish": 3457, - "messages_publish_rate": 11.2, - "messages_redeliver": 33, - "messages_redeliver_rate": 2.5, - "idle_since": "2015-11-01 8:22:14", - "slave_nodes": 1, - "synchronised_slave_nodes": 1, + acc.Wait(len(expected)) + require.Len(t, acc.Errors, 0) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestRabbitMQGeneratesMetricsSet2(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/api/overview": + jsonFilePath = "testdata/set2/overview.json" + case "/api/nodes": + jsonFilePath = "testdata/set2/nodes.json" + case "/api/queues": + jsonFilePath = "testdata/set2/queues.json" + case "/api/exchanges": + jsonFilePath = "testdata/set2/exchanges.json" + case "/api/federation-links": + jsonFilePath = "testdata/set2/federation-links.json" + case "/api/nodes/rabbit@rmqserver/memory": + jsonFilePath = "testdata/set2/memory.json" + default: + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + return + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + + _, err = w.Write(data) + require.NoError(t, err) + })) + defer ts.Close() + + // Define test cases + expected := []telegraf.Metric{ + testutil.MustMetric("rabbitmq_overview", + map[string]string{ + "url": ts.URL, + }, + map[string]interface{}{ + "messages": int64(30), + "messages_ready": int64(30), + "messages_unacked": int64(0), + "messages_acked": int64(3736443), + "messages_delivered": int64(3736446), + "messages_delivered_get": int64(3736446), + "messages_published": int64(770025), + "channels": int64(43), + "connections": int64(43), + "consumers": int64(37), + "exchanges": int64(8), + "queues": int64(34), + "clustering_listeners": int64(1), + "amqp_listeners": int64(2), + "return_unroutable": int64(0), + "return_unroutable_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2caf-63e5-41e3-c15a-ba8fa11434b2", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15840), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(180), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(180), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(180), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(180), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:14", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2cb4-aa2d-c08b-457a-62d0893523a1", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15600), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(177), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(177), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(177), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(177), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:14", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2cb5-3820-e01b-6e20-ba29d5553fc3", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15584), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(175), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(175), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(175), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(175), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:15", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_node", + map[string]string{ + "node": "rabbit@rmqserver", + "url": ts.URL, + }, + map[string]interface{}{ + "disk_free": int64(25086496768), + "disk_free_limit": int64(50000000), + "disk_free_alarm": int64(0), + "fd_total": int64(65536), + "fd_used": int64(78), + "mem_limit": int64(1717546188), + "mem_used": int64(387645440), + "mem_alarm": int64(0), + "proc_total": int64(1048576), + "proc_used": int64(1128), + "run_queue": int64(1), + "sockets_total": int64(58893), + "sockets_used": int64(43), + "uptime": int64(4150152129), + "running": int64(1), + "mnesia_disk_tx_count": int64(103), + "mnesia_ram_tx_count": int64(2257), + "mnesia_disk_tx_count_rate": float64(0.0), + "mnesia_ram_tx_count_rate": float64(0.0), + "gc_num": int64(329526389), + "gc_bytes_reclaimed": int64(13660012170840), + "gc_num_rate": float64(125.2), + "gc_bytes_reclaimed_rate": float64(6583379.2), + "io_read_avg_time": float64(0.0), + "io_read_avg_time_rate": float64(0.0), + "io_read_bytes": int64(1), + "io_read_bytes_rate": float64(0.0), + "io_write_avg_time": float64(0.0), + "io_write_avg_time_rate": float64(0.0), + "io_write_bytes": int64(193066), + "io_write_bytes_rate": float64(0.0), + "mem_connection_readers": int64(1246768), + "mem_connection_writers": int64(72108), + "mem_connection_channels": int64(308588), + "mem_connection_other": int64(4883596), + "mem_queue_procs": int64(780996), + "mem_queue_slave_procs": int64(0), + "mem_plugins": int64(11932828), + "mem_other_proc": int64(39203520), + "mem_metrics": int64(626932), + "mem_mgmt_db": int64(3341264), + "mem_mnesia": int64(396016), + "mem_other_ets": int64(3771384), + "mem_binary": int64(209324208), + "mem_msg_index": int64(32648), + "mem_code": int64(32810827), + "mem_atom": int64(1458513), + "mem_other_system": int64(14284124), + "mem_allocated_unused": int64(61026048), + "mem_reserved_unallocated": int64(0), + "mem_total": int64(385548288), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(284725), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(284572), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.direct", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.fanout", + "internal": "false", + "type": "fanout", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.headers", + "internal": "false", + "type": "headers", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.match", + "internal": "false", + "type": "headers", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.rabbitmq.trace", + "internal": "true", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.topic", + "internal": "false", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "true", + "durable": "false", + "exchange": "Exchange", + "internal": "false", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(18006), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(60798), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), } - compareMetrics(t, queuesMetrics, acc, "rabbitmq_queue") - - nodeMetrics := map[string]interface{}{ - "disk_free": 3776, - "disk_free_limit": 50000000, - "disk_free_alarm": 0, - "fd_total": 1024, - "fd_used": 63, - "mem_limit": 2503, - "mem_used": 159707080, - "mem_alarm": 1, - "proc_total": 1048576, - "proc_used": 783, - "run_queue": 0, - "sockets_total": 829, - "sockets_used": 45, - "uptime": 7464827, - "running": 1, - "mnesia_disk_tx_count": 16, - "mnesia_ram_tx_count": 296, - "mnesia_disk_tx_count_rate": 1.1, - "mnesia_ram_tx_count_rate": 2.2, - "gc_num": 57280132, - "gc_bytes_reclaimed": 2533, - "gc_num_rate": 274.2, - "gc_bytes_reclaimed_rate": 16490856.3, - "io_read_avg_time": 983, - "io_read_avg_time_rate": 88.77, - "io_read_bytes": 1111, - "io_read_bytes_rate": 99.99, - "io_write_avg_time": 134, - "io_write_avg_time_rate": 4.32, - "io_write_bytes": 823, - "io_write_bytes_rate": 32.8, - "mem_connection_readers": 1234, - "mem_connection_writers": 5678, - "mem_connection_channels": 1133, - "mem_connection_other": 2840, - "mem_queue_procs": 2840, - "mem_queue_slave_procs": 0, - "mem_plugins": 1755976, - "mem_other_proc": 23056584, - "mem_metrics": 196536, - "mem_mgmt_db": 491272, - "mem_mnesia": 115600, - "mem_other_ets": 2121872, - "mem_binary": 418848, - "mem_msg_index": 42848, - "mem_code": 25179322, - "mem_atom": 1041593, - "mem_other_system": 14741981, - "mem_allocated_unused": 38208528, - "mem_reserved_unallocated": 0, - "mem_total": 83025920, + expectedErrors := []error{ + fmt.Errorf("error response trying to get \"/api/federation-links\": \"Object Not Found\" (reason: \"Not Found\")"), } - compareMetrics(t, nodeMetrics, acc, "rabbitmq_node") - exchangeMetrics := map[string]interface{}{ - "messages_publish_in": 3678, - "messages_publish_in_rate": 3.2, - "messages_publish_out": 3677, - "messages_publish_out_rate": 5.1, - } - compareMetrics(t, exchangeMetrics, acc, "rabbitmq_exchange") - - federationLinkMetrics := map[string]interface{}{ - "acks_uncommitted": 1, - "consumers": 2, - "messages_unacknowledged": 3, - "messages_uncommitted": 4, - "messages_unconfirmed": 5, - "messages_confirm": 67, - "messages_publish": 890, - "messages_return_unroutable": 1, + // Run the test + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, } - compareMetrics(t, federationLinkMetrics, acc, "rabbitmq_federation") + require.NoError(t, plugin.Init()) + + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + + acc.Wait(len(expected)) + require.Len(t, acc.Errors, len(expectedErrors)) + require.ElementsMatch(t, expectedErrors, acc.Errors) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), testutil.SortMetrics()) } -func compareMetrics(t *testing.T, expectedMetrics map[string]interface{}, - accumulator *testutil.Accumulator, measurementKey string) { - measurement, exist := accumulator.Get(measurementKey) +func TestRabbitMQMetricFilerts(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + })) + defer ts.Close() + + metricErrors := map[string]error{ + "exchange": fmt.Errorf("getting \"/api/exchanges\" failed: 404 Not Found"), + "federation": fmt.Errorf("getting \"/api/federation-links\" failed: 404 Not Found"), + "node": fmt.Errorf("getting \"/api/nodes\" failed: 404 Not Found"), + "overview": fmt.Errorf("getting \"/api/overview\" failed: 404 Not Found"), + "queue": fmt.Errorf("getting \"/api/queues\" failed: 404 Not Found"), + } - assert.True(t, exist, "There is measurement %s", measurementKey) - assert.Equal(t, len(expectedMetrics), len(measurement.Fields)) + // Include test + for name, expected := range metricErrors { + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, + MetricInclude: []string{name}, + } + require.NoError(t, plugin.Init()) - for metricName, metricValue := range expectedMetrics { - actualMetricValue := measurement.Fields[metricName] + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + require.Len(t, acc.Errors, 1) + require.ElementsMatch(t, []error{expected}, acc.Errors) + } - if accumulator.HasStringField(measurementKey, metricName) { - assert.Equal(t, metricValue, actualMetricValue, - "Metric name: %s", metricName) - } else { - assert.InDelta(t, metricValue, actualMetricValue, 0e5, - "Metric name: %s", metricName) + // Exclude test + for name := range metricErrors { + // Exclude the current metric error from the list of expected errors + var expected []error + for n, e := range metricErrors { + if n != name { + expected = append(expected, e) + } } + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, + MetricExclude: []string{name}, + } + require.NoError(t, plugin.Init()) + + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + require.Len(t, acc.Errors, len(expected)) + require.ElementsMatch(t, expected, acc.Errors) } } diff --git a/plugins/inputs/rabbitmq/testdata/exchanges.json b/plugins/inputs/rabbitmq/testdata/set1/exchanges.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/exchanges.json rename to plugins/inputs/rabbitmq/testdata/set1/exchanges.json diff --git a/plugins/inputs/rabbitmq/testdata/federation-links.json b/plugins/inputs/rabbitmq/testdata/set1/federation-links.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/federation-links.json rename to plugins/inputs/rabbitmq/testdata/set1/federation-links.json diff --git a/plugins/inputs/rabbitmq/testdata/memory.json b/plugins/inputs/rabbitmq/testdata/set1/memory.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/memory.json rename to plugins/inputs/rabbitmq/testdata/set1/memory.json diff --git a/plugins/inputs/rabbitmq/testdata/nodes.json b/plugins/inputs/rabbitmq/testdata/set1/nodes.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/nodes.json rename to plugins/inputs/rabbitmq/testdata/set1/nodes.json diff --git a/plugins/inputs/rabbitmq/testdata/overview.json b/plugins/inputs/rabbitmq/testdata/set1/overview.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/overview.json rename to plugins/inputs/rabbitmq/testdata/set1/overview.json diff --git a/plugins/inputs/rabbitmq/testdata/queues.json b/plugins/inputs/rabbitmq/testdata/set1/queues.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/queues.json rename to plugins/inputs/rabbitmq/testdata/set1/queues.json diff --git a/plugins/inputs/rabbitmq/testdata/set2/exchanges.json b/plugins/inputs/rabbitmq/testdata/set2/exchanges.json new file mode 100644 index 0000000000000..df47fe44bbd7f --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/exchanges.json @@ -0,0 +1,104 @@ +[ + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "message_stats": { + "publish_in": 284725, + "publish_in_details": { + "rate": 0 + }, + "publish_out": 284572, + "publish_out_details": { + "rate": 0 + } + }, + "name": "", + "type": "direct", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": true, + "durable": false, + "internal": false, + "message_stats": { + "publish_in": 18006, + "publish_in_details": { + "rate": 0 + }, + "publish_out": 60798, + "publish_out_details": { + "rate": 0 + } + }, + "name": "Exchange", + "type": "topic", + "user_who_performed_action": "user", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.direct", + "type": "direct", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.fanout", + "type": "fanout", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.headers", + "type": "headers", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.match", + "type": "headers", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": true, + "name": "amq.rabbitmq.trace", + "type": "topic", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.topic", + "type": "topic", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + } +] diff --git a/plugins/inputs/rabbitmq/testdata/set2/federation-links.json b/plugins/inputs/rabbitmq/testdata/set2/federation-links.json new file mode 100644 index 0000000000000..0d121cb2f3e64 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/federation-links.json @@ -0,0 +1 @@ +{"error":"Object Not Found","reason":"Not Found"} diff --git a/plugins/inputs/rabbitmq/testdata/set2/memory.json b/plugins/inputs/rabbitmq/testdata/set2/memory.json new file mode 100644 index 0000000000000..d18558ae21e5a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/memory.json @@ -0,0 +1,31 @@ +{ + "memory": { + "connection_readers": 1246768, + "connection_writers": 72108, + "connection_channels": 308588, + "connection_other": 4883596, + "queue_procs": 780996, + "queue_slave_procs": 0, + "quorum_queue_procs": 0, + "plugins": 11932828, + "other_proc": 39203520, + "metrics": 626932, + "mgmt_db": 3341264, + "mnesia": 396016, + "quorum_ets": 47920, + "other_ets": 3771384, + "binary": 209324208, + "msg_index": 32648, + "code": 32810827, + "atom": 1458513, + "other_system": 14284124, + "allocated_unused": 61026048, + "reserved_unallocated": 0, + "strategy": "rss", + "total": { + "erlang": 324522240, + "rss": 385548288, + "allocated": 385548288 + } + } +} diff --git a/plugins/inputs/rabbitmq/testdata/set2/nodes.json b/plugins/inputs/rabbitmq/testdata/set2/nodes.json new file mode 100644 index 0000000000000..6dcfb0d514efd --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/nodes.json @@ -0,0 +1,417 @@ +[ + { + "partitions": [], + "os_pid": "8268", + "fd_total": 65536, + "sockets_total": 58893, + "mem_limit": 1717546188, + "mem_alarm": false, + "disk_free_limit": 50000000, + "disk_free_alarm": false, + "proc_total": 1048576, + "rates_mode": "basic", + "uptime": 4150152129, + "run_queue": 1, + "processors": 4, + "exchange_types": [ + { + "name": "topic", + "description": "AMQP topic exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "fanout", + "description": "AMQP fanout exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "direct", + "description": "AMQP direct exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "headers", + "description": "AMQP headers exchange, as per the AMQP specification", + "enabled": true + } + ], + "auth_mechanisms": [ + { + "name": "PLAIN", + "description": "SASL PLAIN authentication mechanism", + "enabled": true + }, + { + "name": "AMQPLAIN", + "description": "QPid AMQPLAIN mechanism", + "enabled": true + }, + { + "name": "RABBIT-CR-DEMO", + "description": "RabbitMQ Demo challenge-response authentication mechanism", + "enabled": false + } + ], + "applications": [ + { + "name": "amqp_client", + "description": "RabbitMQ AMQP Client", + "version": "3.8.14" + }, + { + "name": "asn1", + "description": "The Erlang ASN1 compiler version 5.0.14", + "version": "5.0.14" + }, + { + "name": "aten", + "description": "Erlang node failure detector", + "version": "0.5.5" + }, + { + "name": "compiler", + "description": "ERTS CXC 138 10", + "version": "7.6.6" + }, + { + "name": "cowboy", + "description": "Small, fast, modern HTTP server.", + "version": "2.8.0" + }, + { + "name": "cowlib", + "description": "Support library for manipulating Web protocols.", + "version": "2.9.1" + }, + { + "name": "credentials_obfuscation", + "description": "Helper library that obfuscates sensitive values in process state", + "version": "2.4.0" + }, + { + "name": "crypto", + "description": "CRYPTO", + "version": "4.8.3" + }, + { + "name": "cuttlefish", + "description": "cuttlefish configuration abstraction", + "version": "2.6.0" + }, + { + "name": "gen_batch_server", + "description": "Generic batching server", + "version": "0.8.4" + }, + { + "name": "goldrush", + "description": "Erlang event stream processor", + "version": "0.1.9" + }, + { + "name": "inets", + "description": "INETS CXC 138 49", + "version": "7.3.2" + }, + { + "name": "jsx", + "description": "a streaming, evented json parsing toolkit", + "version": "2.11.0" + }, + { + "name": "kernel", + "description": "ERTS CXC 138 10", + "version": "7.2.1" + }, + { + "name": "lager", + "description": "Erlang logging framework", + "version": "3.8.2" + }, + { + "name": "mnesia", + "description": "MNESIA CXC 138 12", + "version": "4.18.1" + }, + { + "name": "observer_cli", + "description": "Visualize Erlang Nodes On The Command Line", + "version": "1.6.1" + }, + { + "name": "os_mon", + "description": "CPO CXC 138 46", + "version": "2.6.1" + }, + { + "name": "public_key", + "description": "Public key infrastructure", + "version": "1.9.2" + }, + { + "name": "ra", + "description": "Raft library", + "version": "1.1.8" + }, + { + "name": "rabbit", + "description": "RabbitMQ", + "version": "3.8.14" + }, + { + "name": "rabbit_common", + "description": "Modules shared by rabbitmq-server and rabbitmq-erlang-client", + "version": "3.8.14" + }, + { + "name": "rabbitmq_management", + "description": "RabbitMQ Management Console", + "version": "3.8.14" + }, + { + "name": "rabbitmq_management_agent", + "description": "RabbitMQ Management Agent", + "version": "3.8.14" + }, + { + "name": "rabbitmq_prelaunch", + "description": "RabbitMQ prelaunch setup", + "version": "3.8.14" + }, + { + "name": "rabbitmq_web_dispatch", + "description": "RabbitMQ Web Dispatcher", + "version": "3.8.14" + }, + { + "name": "ranch", + "description": "Socket acceptor pool for TCP protocols.", + "version": "1.7.1" + }, + { + "name": "recon", + "description": "Diagnostic tools for production use", + "version": "2.5.1" + }, + { + "name": "sasl", + "description": "SASL CXC 138 11", + "version": "4.0.1" + }, + { + "name": "ssl", + "description": "Erlang/OTP SSL application", + "version": "10.2.4" + }, + { + "name": "stdlib", + "description": "ERTS CXC 138 10", + "version": "3.14" + }, + { + "name": "stdout_formatter", + "description": "Tools to format paragraphs, lists and tables as plain text", + "version": "0.2.4" + }, + { + "name": "syntax_tools", + "description": "Syntax tools", + "version": "2.4" + }, + { + "name": "sysmon_handler", + "description": "Rate-limiting system_monitor event handler", + "version": "1.3.0" + }, + { + "name": "tools", + "description": "DEVTOOLS CXC 138 16", + "version": "3.4.3" + }, + { + "name": "xmerl", + "description": "XML parser", + "version": "1.3.26" + } + ], + "contexts": [ + { + "description": "RabbitMQ Management", + "path": "/", + "cowboy_opts": "[{sendfile,false}]", + "port": "15672" + } + ], + "log_files": [ + "c:/Users/user/AppData/Roaming/RabbitMQ/log/rabbit@rmqserver.log", + "c:/Users/user/AppData/Roaming/RabbitMQ/log/rabbit@rmqserver_upgrade.log" + ], + "db_dir": "c:/Users/user/AppData/Roaming/RabbitMQ/db/rabbit@rmqserver-mnesia", + "config_files": [ + "c:/Users/user/AppData/Roaming/RabbitMQ/advanced.config" + ], + "net_ticktime": 60, + "enabled_plugins": [ + "rabbitmq_management" + ], + "mem_calculation_strategy": "rss", + "ra_open_file_metrics": { + "ra_log_wal": 1, + "ra_log_segment_writer": 0 + }, + "name": "rabbit@rmqserver", + "type": "disc", + "running": true, + "mem_used": 387645440, + "mem_used_details": { + "rate": 419430.4 + }, + "fd_used": 78, + "fd_used_details": { + "rate": 0 + }, + "sockets_used": 43, + "sockets_used_details": { + "rate": 0 + }, + "proc_used": 1128, + "proc_used_details": { + "rate": 0 + }, + "disk_free": 25086496768, + "disk_free_details": { + "rate": -118784 + }, + "gc_num": 329526389, + "gc_num_details": { + "rate": 125.2 + }, + "gc_bytes_reclaimed": 13660012170840, + "gc_bytes_reclaimed_details": { + "rate": 6583379.2 + }, + "context_switches": 974149754, + "context_switches_details": { + "rate": 270 + }, + "io_read_count": 1, + "io_read_count_details": { + "rate": 0 + }, + "io_read_bytes": 1, + "io_read_bytes_details": { + "rate": 0 + }, + "io_read_avg_time": 0, + "io_read_avg_time_details": { + "rate": 0 + }, + "io_write_count": 45, + "io_write_count_details": { + "rate": 0 + }, + "io_write_bytes": 193066, + "io_write_bytes_details": { + "rate": 0 + }, + "io_write_avg_time": 0, + "io_write_avg_time_details": { + "rate": 0 + }, + "io_sync_count": 45, + "io_sync_count_details": { + "rate": 0 + }, + "io_sync_avg_time": 0, + "io_sync_avg_time_details": { + "rate": 0 + }, + "io_seek_count": 31, + "io_seek_count_details": { + "rate": 0 + }, + "io_seek_avg_time": 0, + "io_seek_avg_time_details": { + "rate": 0 + }, + "io_reopen_count": 0, + "io_reopen_count_details": { + "rate": 0 + }, + "mnesia_ram_tx_count": 2257, + "mnesia_ram_tx_count_details": { + "rate": 0 + }, + "mnesia_disk_tx_count": 103, + "mnesia_disk_tx_count_details": { + "rate": 0 + }, + "msg_store_read_count": 0, + "msg_store_read_count_details": { + "rate": 0 + }, + "msg_store_write_count": 1, + "msg_store_write_count_details": { + "rate": 0 + }, + "queue_index_journal_write_count": 165, + "queue_index_journal_write_count_details": { + "rate": 0 + }, + "queue_index_write_count": 0, + "queue_index_write_count_details": { + "rate": 0 + }, + "queue_index_read_count": 0, + "queue_index_read_count_details": { + "rate": 0 + }, + "io_file_handle_open_attempt_count": 882, + "io_file_handle_open_attempt_count_details": { + "rate": 0 + }, + "io_file_handle_open_attempt_avg_time": 0.05442176870748299, + "io_file_handle_open_attempt_avg_time_details": { + "rate": 0 + }, + "connection_created": 2310, + "connection_created_details": { + "rate": 0 + }, + "connection_closed": 2268, + "connection_closed_details": { + "rate": 0 + }, + "channel_created": 2310, + "channel_created_details": { + "rate": 0 + }, + "channel_closed": 2267, + "channel_closed_details": { + "rate": 0 + }, + "queue_declared": 144281, + "queue_declared_details": { + "rate": 0 + }, + "queue_created": 663, + "queue_created_details": { + "rate": 0 + }, + "queue_deleted": 629, + "queue_deleted_details": { + "rate": 0 + }, + "cluster_links": [], + "metrics_gc_queue_length": { + "connection_closed": 0, + "channel_closed": 0, + "consumer_deleted": 0, + "exchange_deleted": 0, + "queue_deleted": 0, + "vhost_deleted": 0, + "node_node_deleted": 0, + "channel_consumer_deleted": 0 + } + } +] diff --git a/plugins/inputs/rabbitmq/testdata/set2/overview.json b/plugins/inputs/rabbitmq/testdata/set2/overview.json new file mode 100644 index 0000000000000..51977d61cbcae --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/overview.json @@ -0,0 +1 @@ +{"management_version":"3.8.14","rates_mode":"basic","sample_retention_policies":{"global":[600,3600,28800,86400],"basic":[600,3600],"detailed":[600]},"exchange_types":[{"name":"direct","description":"AMQP direct exchange, as per the AMQP specification","enabled":true},{"name":"fanout","description":"AMQP fanout exchange, as per the AMQP specification","enabled":true},{"name":"headers","description":"AMQP headers exchange, as per the AMQP specification","enabled":true},{"name":"topic","description":"AMQP topic exchange, as per the AMQP specification","enabled":true}],"product_version":"3.8.14","product_name":"RabbitMQ","rabbitmq_version":"3.8.14","cluster_name":"rabbit@rmqserver","erlang_version":"23.2.7","erlang_full_version":"Erlang/OTP 23 [erts-11.1.8] [source] [64-bit] [smp:4:4] [ds:4:4:10] [async-threads:1]","disable_stats":false,"enable_queue_totals":false,"message_stats":{"ack":3736443,"ack_details":{"rate":0.0},"confirm":0,"confirm_details":{"rate":0.0},"deliver":3736446,"deliver_details":{"rate":0.0},"deliver_get":3736446,"deliver_get_details":{"rate":0.0},"deliver_no_ack":0,"deliver_no_ack_details":{"rate":0.0},"disk_reads":0,"disk_reads_details":{"rate":0.0},"disk_writes":55,"disk_writes_details":{"rate":0.0},"drop_unroutable":0,"drop_unroutable_details":{"rate":0.0},"get":0,"get_details":{"rate":0.0},"get_empty":0,"get_empty_details":{"rate":0.0},"get_no_ack":0,"get_no_ack_details":{"rate":0.0},"publish":770025,"publish_details":{"rate":0.0},"redeliver":1,"redeliver_details":{"rate":0.0},"return_unroutable":0,"return_unroutable_details":{"rate":0.0}},"churn_rates":{"channel_closed":2267,"channel_closed_details":{"rate":0.0},"channel_created":2310,"channel_created_details":{"rate":0.0},"connection_closed":2268,"connection_closed_details":{"rate":0.0},"connection_created":2310,"connection_created_details":{"rate":0.0},"queue_created":663,"queue_created_details":{"rate":0.0},"queue_declared":144281,"queue_declared_details":{"rate":0.0},"queue_deleted":629,"queue_deleted_details":{"rate":0.0}},"queue_totals":{"messages":30,"messages_details":{"rate":0.0},"messages_ready":30,"messages_ready_details":{"rate":0.0},"messages_unacknowledged":0,"messages_unacknowledged_details":{"rate":0.0}},"object_totals":{"channels":43,"connections":43,"consumers":37,"exchanges":8,"queues":34},"statistics_db_event_queue":0,"node":"rabbit@rmqserver","listeners":[{"node":"rabbit@rmqserver","protocol":"amqp","ip_address":"0.0.0.0","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit@rmqserver","protocol":"amqp","ip_address":"::","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit@rmqserver","protocol":"amqp/ssl","ip_address":"0.0.0.0","port":5671,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false,"versions":["tlsv1.3","tlsv1.2","tlsv1.1","tlsv1"],"cacertfile":"C:\\ProgramData\\Chain.pem","certfile":"C:\\ProgramData\\server.crt","keyfile":"C:\\ProgramData\\server.key","verify":"verify_peer","depth":3,"fail_if_no_peer_cert":false}},{"node":"rabbit@rmqserver","protocol":"amqp/ssl","ip_address":"::","port":5671,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false,"versions":["tlsv1.3","tlsv1.2","tlsv1.1","tlsv1"],"cacertfile":"C:\\ProgramData\\Chain.pem","certfile":"C:\\ProgramData\\server.crt","keyfile":"C:\\ProgramData\\server.key","verify":"verify_peer","depth":3,"fail_if_no_peer_cert":false}},{"node":"rabbit@rmqserver","protocol":"clustering","ip_address":"::","port":25672,"socket_opts":[]},{"node":"rabbit@rmqserver","protocol":"http","ip_address":"0.0.0.0","port":15672,"socket_opts":{"cowboy_opts":{"sendfile":false},"port":15672}},{"node":"rabbit@rmqserver","protocol":"http","ip_address":"::","port":15672,"socket_opts":{"cowboy_opts":{"sendfile":false},"port":15672}}],"contexts":[{"ssl_opts":[],"node":"rabbit@rmqserver","description":"RabbitMQ Management","path":"/","cowboy_opts":"[{sendfile,false}]","port":"15672"}]} diff --git a/plugins/inputs/rabbitmq/testdata/set2/queues.json b/plugins/inputs/rabbitmq/testdata/set2/queues.json new file mode 100644 index 0000000000000..6d8c2a831158a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/queues.json @@ -0,0 +1,356 @@ +[ + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 180, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16174 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:14", + "memory": 15840, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 180, + "ack_details": { + "rate": 0 + }, + "deliver": 180, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 180, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 180, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2caf-63e5-41e3-c15a-ba8fa11434b2", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11766294, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 177, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16205 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:14", + "memory": 15600, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 177, + "ack_details": { + "rate": 0 + }, + "deliver": 177, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 177, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 177, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2cb4-aa2d-c08b-457a-62d0893523a1", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11706656, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 175, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16183 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:15", + "memory": 15584, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 175, + "ack_details": { + "rate": 0 + }, + "deliver": 175, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 175, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 175, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2cb5-3820-e01b-6e20-ba29d5553fc3", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11649471, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + } +] diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index bcbf773689f33..904d5418ec8db 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -16,8 +16,8 @@ import ( ) type Raindrops struct { - Urls []string - http_client *http.Client + Urls []string + httpClient *http.Client } var sampleConfig = ` @@ -39,14 +39,14 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error { for _, u := range r.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(r.gatherUrl(addr, acc)) + acc.AddError(r.gatherURL(addr, acc)) }(addr) } @@ -55,8 +55,8 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error { return nil } -func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { - resp, err := r.http_client.Get(addr.String()) +func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { + resp, err := r.httpClient.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) } @@ -101,10 +101,10 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { acc.AddFields("raindrops", fields, tags) iterate := true - var queued_line_str string - var active_line_str string - var active_err error - var queued_err error + var queuedLineStr string + var activeLineStr string + var activeErr error + var queuedErr error for iterate { // Listen @@ -114,43 +114,42 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { "active": 0, "queued": 0, } - active_line_str, active_err = buf.ReadString('\n') - if active_err != nil { + activeLineStr, activeErr = buf.ReadString('\n') + if activeErr != nil { iterate = false break } - if strings.Compare(active_line_str, "\n") == 0 { + if strings.Compare(activeLineStr, "\n") == 0 { break } - queued_line_str, queued_err = buf.ReadString('\n') - if queued_err != nil { + queuedLineStr, queuedErr = buf.ReadString('\n') + if queuedErr != nil { iterate = false } - active_line := strings.Split(active_line_str, " ") - listen_name := active_line[0] + activeLine := strings.Split(activeLineStr, " ") + listenName := activeLine[0] - active, err := strconv.ParseUint(strings.TrimSpace(active_line[2]), 10, 64) + active, err := strconv.ParseUint(strings.TrimSpace(activeLine[2]), 10, 64) if err != nil { active = 0 } lis["active"] = active - queued_line := strings.Split(queued_line_str, " ") - queued, err := strconv.ParseUint(strings.TrimSpace(queued_line[2]), 10, 64) + queuedLine := strings.Split(queuedLineStr, " ") + queued, err := strconv.ParseUint(strings.TrimSpace(queuedLine[2]), 10, 64) if err != nil { queued = 0 } lis["queued"] = queued - if strings.Contains(listen_name, ":") { - listener := strings.Split(listen_name, ":") + if strings.Contains(listenName, ":") { + listener := strings.Split(listenName, ":") tags = map[string]string{ "ip": listener[0], "port": listener[1], } - } else { tags = map[string]string{ - "socket": listen_name, + "socket": listenName, } } acc.AddFields("raindrops_listen", lis, tags) @@ -177,11 +176,11 @@ func (r *Raindrops) getTags(addr *url.URL) map[string]string { func init() { inputs.Add("raindrops", func() telegraf.Input { - return &Raindrops{http_client: &http.Client{ + return &Raindrops{httpClient: &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, }} }) } diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index b0b601cec49cc..591dd624a10ea 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -49,20 +49,18 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/_raindrops" { - rsp = sampleResponse - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_raindrops", "Cannot handle request") + rsp = sampleResponse - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &Raindrops{ Urls: []string{fmt.Sprintf("%s/_raindrops", ts.URL)}, - http_client: &http.Client{Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + httpClient: &http.Client{Transport: &http.Transport{ + ResponseHeaderTimeout: 3 * time.Second, }}, } diff --git a/plugins/inputs/ras/README.md b/plugins/inputs/ras/README.md index 044118d1517ff..9c1cda75bff10 100644 --- a/plugins/inputs/ras/README.md +++ b/plugins/inputs/ras/README.md @@ -1,5 +1,7 @@ # RAS Daemon Input Plugin +This plugin is only available on Linux (only for `386`, `amd64`, `arm` and `arm64` architectures). + The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). ### Configuration diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index 036402eb87438..a8d4ba727d7df 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -1,30 +1,39 @@ -// +build !windows +//go:build linux && (386 || amd64 || arm || arm64) +// +build linux +// +build 386 amd64 arm arm64 package ras import ( "database/sql" + "fmt" + "os" "strconv" "strings" "time" - _ "github.com/mattn/go-sqlite3" + _ "modernc.org/sqlite" //to register SQLite driver "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// Ras plugin gathers and counts errors provided by RASDaemon type Ras struct { - DbPath string - latestTimestamp time.Time - cpuSocketCounters map[int]metricCounters - serverCounters metricCounters + DBPath string `toml:"db_path"` + + Log telegraf.Logger `toml:"-"` + db *sql.DB `toml:"-"` + + latestTimestamp time.Time `toml:"-"` + cpuSocketCounters map[int]metricCounters `toml:"-"` + serverCounters metricCounters `toml:"-"` } type machineCheckError struct { - Id int + ID int Timestamp string - SocketId int + SocketID int ErrorMsg string MciStatusMsg string } @@ -59,6 +68,7 @@ const ( unclassifiedMCEBase = "unclassified_mce_errors" ) +// SampleConfig returns sample configuration for this plugin. func (r *Ras) SampleConfig() string { return ` ## Optional path to RASDaemon sqlite3 database. @@ -67,18 +77,39 @@ func (r *Ras) SampleConfig() string { ` } +// Description returns the plugin description. func (r *Ras) Description() string { return "RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required)." } -func (r *Ras) Gather(acc telegraf.Accumulator) error { - db, err := connectToDB(r.DbPath) +// Start initializes connection to DB, metrics are gathered in Gather +func (r *Ras) Start(telegraf.Accumulator) error { + err := validateDbPath(r.DBPath) + if err != nil { + return err + } + + r.db, err = connectToDB(r.DBPath) if err != nil { return err } - defer db.Close() - rows, err := db.Query(mceQuery, r.latestTimestamp) + return nil +} + +// Stop closes any existing DB connection +func (r *Ras) Stop() { + if r.db != nil { + err := r.db.Close() + if err != nil { + r.Log.Errorf("Error appeared during closing DB (%s): %v", r.DBPath, err) + } + } +} + +// Gather reads the stats provided by RASDaemon and writes it to the Accumulator. +func (r *Ras) Gather(acc telegraf.Accumulator) error { + rows, err := r.db.Query(mceQuery, r.latestTimestamp) if err != nil { return err } @@ -96,7 +127,7 @@ func (r *Ras) Gather(acc telegraf.Accumulator) error { r.updateCounters(mcError) } - addCpuSocketMetrics(acc, r.cpuSocketCounters) + addCPUSocketMetrics(acc, r.cpuSocketCounters) addServerMetrics(acc, r.serverCounters) return nil @@ -119,7 +150,7 @@ func (r *Ras) updateCounters(mcError *machineCheckError) { return } - r.initializeCpuMetricDataIfRequired(mcError.SocketId) + r.initializeCPUMetricDataIfRequired(mcError.SocketID) r.updateSocketCounters(mcError) r.updateServerCounters(mcError) } @@ -146,21 +177,38 @@ func newMetricCounters() *metricCounters { func (r *Ras) updateServerCounters(mcError *machineCheckError) { if strings.Contains(mcError.ErrorMsg, "CACHE Level-2") && strings.Contains(mcError.ErrorMsg, "Error") { - r.serverCounters[levelTwoCache] += 1 + r.serverCounters[levelTwoCache]++ } if strings.Contains(mcError.ErrorMsg, "UPI:") { - r.serverCounters[upi] += 1 + r.serverCounters[upi]++ } } -func connectToDB(server string) (*sql.DB, error) { - return sql.Open("sqlite3", server) +func validateDbPath(dbPath string) error { + pathInfo, err := os.Stat(dbPath) + if os.IsNotExist(err) { + return fmt.Errorf("provided db_path does not exist: [%s]", dbPath) + } + + if err != nil { + return fmt.Errorf("cannot get system information for db_path file: [%s] - %v", dbPath, err) + } + + if mode := pathInfo.Mode(); !mode.IsRegular() { + return fmt.Errorf("provided db_path does not point to a regular file: [%s]", dbPath) + } + + return nil +} + +func connectToDB(dbPath string) (*sql.DB, error) { + return sql.Open("sqlite", dbPath) } -func (r *Ras) initializeCpuMetricDataIfRequired(socketId int) { - if _, ok := r.cpuSocketCounters[socketId]; !ok { - r.cpuSocketCounters[socketId] = *newMetricCounters() +func (r *Ras) initializeCPUMetricDataIfRequired(socketID int) { + if _, ok := r.cpuSocketCounters[socketID]; !ok { + r.cpuSocketCounters[socketID] = *newMetricCounters() } } @@ -169,78 +217,78 @@ func (r *Ras) updateSocketCounters(mcError *machineCheckError) { r.updateProcessorBaseCounters(mcError) if strings.Contains(mcError.ErrorMsg, "Instruction TLB") && strings.Contains(mcError.ErrorMsg, "Error") { - r.cpuSocketCounters[mcError.SocketId][instructionTLB] += 1 + r.cpuSocketCounters[mcError.SocketID][instructionTLB]++ } if strings.Contains(mcError.ErrorMsg, "BUS") && strings.Contains(mcError.ErrorMsg, "Error") { - r.cpuSocketCounters[mcError.SocketId][processorBus] += 1 + r.cpuSocketCounters[mcError.SocketID][processorBus]++ } if (strings.Contains(mcError.ErrorMsg, "CACHE Level-0") || strings.Contains(mcError.ErrorMsg, "CACHE Level-1")) && strings.Contains(mcError.ErrorMsg, "Error") { - r.cpuSocketCounters[mcError.SocketId][instructionCache] += 1 + r.cpuSocketCounters[mcError.SocketID][instructionCache]++ } } func (r *Ras) updateProcessorBaseCounters(mcError *machineCheckError) { if strings.Contains(mcError.ErrorMsg, "Internal Timer error") { - r.cpuSocketCounters[mcError.SocketId][internalTimer] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 + r.cpuSocketCounters[mcError.SocketID][internalTimer]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ } if strings.Contains(mcError.ErrorMsg, "SMM Handler Code Access Violation") { - r.cpuSocketCounters[mcError.SocketId][smmHandlerCode] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 + r.cpuSocketCounters[mcError.SocketID][smmHandlerCode]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ } if strings.Contains(mcError.ErrorMsg, "Internal parity error") { - r.cpuSocketCounters[mcError.SocketId][internalParity] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 + r.cpuSocketCounters[mcError.SocketID][internalParity]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ } if strings.Contains(mcError.ErrorMsg, "FRC error") { - r.cpuSocketCounters[mcError.SocketId][frc] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 + r.cpuSocketCounters[mcError.SocketID][frc]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ } if strings.Contains(mcError.ErrorMsg, "External error") { - r.cpuSocketCounters[mcError.SocketId][externalMCEBase] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 + r.cpuSocketCounters[mcError.SocketID][externalMCEBase]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ } if strings.Contains(mcError.ErrorMsg, "Microcode ROM parity error") { - r.cpuSocketCounters[mcError.SocketId][microcodeROMParity] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 + r.cpuSocketCounters[mcError.SocketID][microcodeROMParity]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ } if strings.Contains(mcError.ErrorMsg, "Unclassified") || strings.Contains(mcError.ErrorMsg, "Internal unclassified") { - r.cpuSocketCounters[mcError.SocketId][unclassifiedMCEBase] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 + r.cpuSocketCounters[mcError.SocketID][unclassifiedMCEBase]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ } } func (r *Ras) updateMemoryCounters(mcError *machineCheckError) { if strings.Contains(mcError.ErrorMsg, "Memory read error") { if strings.Contains(mcError.MciStatusMsg, "Corrected_error") { - r.cpuSocketCounters[mcError.SocketId][memoryReadCorrected] += 1 + r.cpuSocketCounters[mcError.SocketID][memoryReadCorrected]++ } else { - r.cpuSocketCounters[mcError.SocketId][memoryReadUncorrected] += 1 + r.cpuSocketCounters[mcError.SocketID][memoryReadUncorrected]++ } } if strings.Contains(mcError.ErrorMsg, "Memory write error") { if strings.Contains(mcError.MciStatusMsg, "Corrected_error") { - r.cpuSocketCounters[mcError.SocketId][memoryWriteCorrected] += 1 + r.cpuSocketCounters[mcError.SocketID][memoryWriteCorrected]++ } else { - r.cpuSocketCounters[mcError.SocketId][memoryWriteUncorrected] += 1 + r.cpuSocketCounters[mcError.SocketID][memoryWriteUncorrected]++ } } } -func addCpuSocketMetrics(acc telegraf.Accumulator, cpuSocketCounters map[int]metricCounters) { - for socketId, data := range cpuSocketCounters { +func addCPUSocketMetrics(acc telegraf.Accumulator, cpuSocketCounters map[int]metricCounters) { + for socketID, data := range cpuSocketCounters { tags := map[string]string{ - "socket_id": strconv.Itoa(socketId), + "socket_id": strconv.Itoa(socketID), } fields := make(map[string]interface{}) @@ -263,7 +311,7 @@ func addServerMetrics(acc telegraf.Accumulator, counters map[string]int64) { func fetchMachineCheckError(rows *sql.Rows) (*machineCheckError, error) { mcError := &machineCheckError{} - err := rows.Scan(&mcError.Id, &mcError.Timestamp, &mcError.ErrorMsg, &mcError.MciStatusMsg, &mcError.SocketId) + err := rows.Scan(&mcError.ID, &mcError.Timestamp, &mcError.ErrorMsg, &mcError.MciStatusMsg, &mcError.SocketID) if err != nil { return nil, err @@ -280,7 +328,7 @@ func init() { inputs.Add("ras", func() telegraf.Input { defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700") return &Ras{ - DbPath: defaultDbPath, + DBPath: defaultDbPath, latestTimestamp: defaultTimestamp, cpuSocketCounters: map[int]metricCounters{ 0: *newMetricCounters(), diff --git a/plugins/inputs/ras/ras_notlinux.go b/plugins/inputs/ras/ras_notlinux.go new file mode 100644 index 0000000000000..b0795fd794f6f --- /dev/null +++ b/plugins/inputs/ras/ras_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux || (linux && !386 && !amd64 && !arm && !arm64) +// +build !linux linux,!386,!amd64,!arm,!arm64 + +package ras diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index 7b34074218b5c..656200fde95cc 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -1,4 +1,6 @@ -// +build !windows +//go:build linux && (386 || amd64 || arm || arm64) +// +build linux +// +build 386 amd64 arm arm64 package ras @@ -40,19 +42,19 @@ func TestUpdateLatestTimestamp(t *testing.T) { testData = append(testData, []machineCheckError{ { Timestamp: "2019-05-20 08:25:55 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "", MciStatusMsg: "", }, { Timestamp: "2018-02-21 12:27:22 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "", MciStatusMsg: "", }, { Timestamp: ts, - SocketId: 0, + SocketID: 0, ErrorMsg: "", MciStatusMsg: "", }, @@ -71,25 +73,25 @@ func TestMultipleSockets(t *testing.T) { testData = []machineCheckError{ { Timestamp: "2019-05-20 08:25:55 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: cacheL2, MciStatusMsg: overflow, }, { Timestamp: "2018-02-21 12:27:22 +0200", - SocketId: 1, + SocketID: 1, ErrorMsg: cacheL2, MciStatusMsg: overflow, }, { Timestamp: "2020-03-21 14:17:28 +0200", - SocketId: 2, + SocketID: 2, ErrorMsg: cacheL2, MciStatusMsg: overflow, }, { Timestamp: "2020-03-21 17:24:18 +0200", - SocketId: 3, + SocketID: 3, ErrorMsg: cacheL2, MciStatusMsg: overflow, }, @@ -113,8 +115,8 @@ func TestMultipleSockets(t *testing.T) { func TestMissingDatabase(t *testing.T) { var acc testutil.Accumulator ras := newRas() - ras.DbPath = "/tmp/test.db" - err := ras.Gather(&acc) + ras.DBPath = "/nonexistent/ras.db" + err := ras.Start(&acc) assert.Error(t, err) } @@ -136,7 +138,7 @@ func TestEmptyDatabase(t *testing.T) { func newRas() *Ras { defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700") return &Ras{ - DbPath: defaultDbPath, + DBPath: defaultDbPath, latestTimestamp: defaultTimestamp, cpuSocketCounters: map[int]metricCounters{ 0: *newMetricCounters(), @@ -151,103 +153,103 @@ func newRas() *Ras { var testData = []machineCheckError{ { Timestamp: "2020-05-20 07:34:53 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL0_ERR Transaction: Memory read error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 07:35:11 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL0_ERR Transaction: Memory read error", MciStatusMsg: "Uncorrected_error", }, { Timestamp: "2020-05-20 07:37:50 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL2_ERR Transaction: Memory write error", MciStatusMsg: "Uncorrected_error", }, { Timestamp: "2020-05-20 08:14:51 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "MEMORY CONTROLLER WR_CHANNEL2_ERR Transaction: Memory write error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:15:31 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "corrected filtering (some unreported errors in same region) Instruction CACHE Level-0 Read Error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:16:32 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "Instruction TLB Level-0 Error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:16:56 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "No Error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:17:24 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "Unclassified", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:17:41 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "Microcode ROM parity error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:17:48 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "FRC error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:18:18 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "Internal parity error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:18:34 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "SMM Handler Code Access Violation", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:18:54 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "Internal Timer error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:21:23 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "BUS Level-3 Generic Generic IO Request-did-not-timeout Error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:23:23 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "External error", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:25:31 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "UPI: COR LL Rx detected CRC error - successful LLR without Phy Reinit", MciStatusMsg: "Error_overflow Corrected_error", }, { Timestamp: "2020-05-20 08:25:55 +0200", - SocketId: 0, + SocketID: 0, ErrorMsg: "Instruction CACHE Level-2 Generic Error", MciStatusMsg: "Error_overflow Corrected_error", }, diff --git a/plugins/inputs/ras/ras_windows.go b/plugins/inputs/ras/ras_windows.go deleted file mode 100644 index ac7dadd567381..0000000000000 --- a/plugins/inputs/ras/ras_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build windows - -package ras diff --git a/plugins/inputs/ravendb/README.md b/plugins/inputs/ravendb/README.md new file mode 100644 index 0000000000000..b40850ab5c82d --- /dev/null +++ b/plugins/inputs/ravendb/README.md @@ -0,0 +1,216 @@ +# RavenDB Input Plugin + +Reads metrics from RavenDB servers via monitoring endpoints APIs. + +Requires RavenDB Server 5.2+. + +### Configuration + +The following is an example config for RavenDB. **Note:** The client certificate used should have `Operator` permissions on the cluster. + +```toml +[[inputs.ravendb]] + ## Node URL and port that RavenDB is listening on + url = "https://localhost:8080" + + ## RavenDB X509 client certificate setup + tls_cert = "/etc/telegraf/raven.crt" + tls_key = "/etc/telegraf/raven.key" + + ## Optional request timeout + ## + ## Timeout, specifies the amount of time to wait + ## for a server's response headers after fully writing the request and + ## time limit for requests made by this client + # timeout = "5s" + + ## List of statistics which are collected + # At least one is required + # Allowed values: server, databases, indexes, collections + # + # stats_include = ["server", "databases", "indexes", "collections"] + + ## List of db where database stats are collected + ## If empty, all db are concerned + # db_stats_dbs = [] + + ## List of db where index status are collected + ## If empty, all indexes from all db are concerned + # index_stats_dbs = [] + + ## List of db where collection status are collected + ## If empty, all collections from all db are concerned + # collection_stats_dbs = [] +``` + +### Metrics + +- ravendb_server + - tags: + - url + - node_tag + - cluster_id + - public_server_url (optional) + - fields: + - backup_current_number_of_running_backups + - backup_max_number_of_concurrent_backups + - certificate_server_certificate_expiration_left_in_sec (optional) + - certificate_well_known_admin_certificates (optional, separated by ';') + - cluster_current_term + - cluster_index + - cluster_node_state + - 0 -> Passive + - 1 -> Candidate + - 2 -> Follower + - 3 -> LeaderElect + - 4 -> Leader + - config_public_tcp_server_urls (optional, separated by ';') + - config_server_urls + - config_tcp_server_urls (optional, separated by ';') + - cpu_assigned_processor_count + - cpu_machine_usage + - cpu_machine_io_wait (optional) + - cpu_process_usage + - cpu_processor_count + - cpu_thread_pool_available_worker_threads + - cpu_thread_pool_available_completion_port_threads + - databases_loaded_count + - databases_total_count + - disk_remaining_storage_space_percentage + - disk_system_store_used_data_file_size_in_mb + - disk_system_store_total_data_file_size_in_mb + - disk_total_free_space_in_mb + - license_expiration_left_in_sec (optional) + - license_max_cores + - license_type + - license_utilized_cpu_cores + - memory_allocated_in_mb + - memory_installed_in_mb + - memory_low_memory_severity + - 0 -> None + - 1 -> Low + - 2 -> Extremely Low + - memory_physical_in_mb + - memory_total_dirty_in_mb + - memory_total_swap_size_in_mb + - memory_total_swap_usage_in_mb + - memory_working_set_swap_usage_in_mb + - network_concurrent_requests_count + - network_last_authorized_non_cluster_admin_request_time_in_sec (optional) + - network_last_request_time_in_sec (optional) + - network_requests_per_sec + - network_tcp_active_connections + - network_total_requests + - server_full_version + - server_process_id + - server_version + - uptime_in_sec + +- ravendb_databases + - tags: + - url + - database_name + - database_id + - node_tag + - public_server_url (optional) + - fields: + - counts_alerts + - counts_attachments + - counts_documents + - counts_performance_hints + - counts_rehabs + - counts_replication_factor + - counts_revisions + - counts_unique_attachments + - statistics_doc_puts_per_sec + - statistics_map_index_indexes_per_sec + - statistics_map_reduce_index_mapped_per_sec + - statistics_map_reduce_index_reduced_per_sec + - statistics_request_average_duration_in_ms + - statistics_requests_count + - statistics_requests_per_sec + - indexes_auto_count + - indexes_count + - indexes_disabled_count + - indexes_errors_count + - indexes_errored_count + - indexes_idle_count + - indexes_stale_count + - indexes_static_count + - storage_documents_allocated_data_file_in_mb + - storage_documents_used_data_file_in_mb + - storage_indexes_allocated_data_file_in_mb + - storage_indexes_used_data_file_in_mb + - storage_total_allocated_storage_file_in_mb + - storage_total_free_space_in_mb + - time_since_last_backup_in_sec (optional) + - uptime_in_sec + +- ravendb_indexes + - tags: + - database_name + - index_name + - node_tag + - public_server_url (optional) + - url + - fields + - errors + - is_invalid + - lock_mode + - Unlock + - LockedIgnore + - LockedError + - mapped_per_sec + - priority + - Low + - Normal + - High + - reduced_per_sec + - state + - Normal + - Disabled + - Idle + - Error + - status + - Running + - Paused + - Disabled + - time_since_last_indexing_in_sec (optional) + - time_since_last_query_in_sec (optional) + - type + - None + - AutoMap + - AutoMapReduce + - Map + - MapReduce + - Faulty + - JavaScriptMap + - JavaScriptMapReduce + +- ravendb_collections + - tags: + - collection_name + - database_name + - node_tag + - public_server_url (optional) + - url + - fields + - documents_count + - documents_size_in_bytes + - revisions_size_in_bytes + - tombstones_size_in_bytes + - total_size_in_bytes + +### Example output + +``` +> ravendb_server,cluster_id=07aecc42-9194-4181-999c-1c42450692c9,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 backup_current_number_of_running_backups=0i,backup_max_number_of_concurrent_backups=4i,certificate_server_certificate_expiration_left_in_sec=-1,cluster_current_term=2i,cluster_index=10i,cluster_node_state=4i,config_server_urls="http://127.0.0.1:8080",cpu_assigned_processor_count=8i,cpu_machine_usage=19.09944089456869,cpu_process_usage=0.16977205323024872,cpu_processor_count=8i,cpu_thread_pool_available_completion_port_threads=1000i,cpu_thread_pool_available_worker_threads=32763i,databases_loaded_count=1i,databases_total_count=1i,disk_remaining_storage_space_percentage=18i,disk_system_store_total_data_file_size_in_mb=35184372088832i,disk_system_store_used_data_file_size_in_mb=31379031064576i,disk_total_free_space_in_mb=42931i,license_expiration_left_in_sec=24079222.8772186,license_max_cores=256i,license_type="Enterprise",license_utilized_cpu_cores=8i,memory_allocated_in_mb=205i,memory_installed_in_mb=16384i,memory_low_memory_severity=0i,memory_physical_in_mb=16250i,memory_total_dirty_in_mb=0i,memory_total_swap_size_in_mb=0i,memory_total_swap_usage_in_mb=0i,memory_working_set_swap_usage_in_mb=0i,network_concurrent_requests_count=1i,network_last_request_time_in_sec=0.0058717,network_requests_per_sec=0.09916543455308825,network_tcp_active_connections=128i,network_total_requests=10i,server_full_version="5.2.0-custom-52",server_process_id=31044i,server_version="5.2",uptime_in_sec=56i 1613027977000000000 +> ravendb_databases,database_id=ced0edba-8f80-48b8-8e81-c3d2c6748ec3,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 counts_alerts=0i,counts_attachments=17i,counts_documents=1059i,counts_performance_hints=0i,counts_rehabs=0i,counts_replication_factor=1i,counts_revisions=5475i,counts_unique_attachments=17i,indexes_auto_count=0i,indexes_count=7i,indexes_disabled_count=0i,indexes_errored_count=0i,indexes_errors_count=0i,indexes_idle_count=0i,indexes_stale_count=0i,indexes_static_count=7i,statistics_doc_puts_per_sec=0,statistics_map_index_indexes_per_sec=0,statistics_map_reduce_index_mapped_per_sec=0,statistics_map_reduce_index_reduced_per_sec=0,statistics_request_average_duration_in_ms=0,statistics_requests_count=0i,statistics_requests_per_sec=0,storage_documents_allocated_data_file_in_mb=140737488355328i,storage_documents_used_data_file_in_mb=74741020884992i,storage_indexes_allocated_data_file_in_mb=175921860444160i,storage_indexes_used_data_file_in_mb=120722940755968i,storage_total_allocated_storage_file_in_mb=325455441821696i,storage_total_free_space_in_mb=42931i,uptime_in_sec=54 1613027977000000000 +> ravendb_indexes,database_name=db1,host=DESKTOP-2OISR6D,index_name=Orders/Totals,node_tag=A,url=http://localhost:8080 errors=0i,is_invalid=false,lock_mode="Unlock",mapped_per_sec=0,priority="Normal",reduced_per_sec=0,state="Normal",status="Running",time_since_last_indexing_in_sec=45.4256655,time_since_last_query_in_sec=45.4304202,type="Map" 1613027977000000000 +> ravendb_collections,collection_name=@hilo,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 documents_count=8i,documents_size_in_bytes=122880i,revisions_size_in_bytes=0i,tombstones_size_in_bytes=122880i,total_size_in_bytes=245760i 1613027977000000000 +``` + +### Contributors + +- Marcin Lewandowski (https://github.com/ml054/) +- Casey Barton (https://github.com/bartoncasey) \ No newline at end of file diff --git a/plugins/inputs/ravendb/ravendb.go b/plugins/inputs/ravendb/ravendb.go new file mode 100644 index 0000000000000..efc1b9517cc24 --- /dev/null +++ b/plugins/inputs/ravendb/ravendb.go @@ -0,0 +1,425 @@ +package ravendb + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// defaultURL will set a default value that corresponds to the default value +// used by RavenDB +const defaultURL = "http://localhost:8080" + +const defaultTimeout = 5 + +// RavenDB defines the configuration necessary for gathering metrics, +// see the sample config for further details +type RavenDB struct { + URL string `toml:"url"` + Name string `toml:"name"` + + Timeout config.Duration `toml:"timeout"` + + StatsInclude []string `toml:"stats_include"` + DbStatsDbs []string `toml:"db_stats_dbs"` + IndexStatsDbs []string `toml:"index_stats_dbs"` + CollectionStatsDbs []string `toml:"collection_stats_dbs"` + + tls.ClientConfig + + Log telegraf.Logger `toml:"-"` + + client *http.Client + requestURLServer string + requestURLDatabases string + requestURLIndexes string + requestURLCollection string +} + +var sampleConfig = ` + ## Node URL and port that RavenDB is listening on + url = "https://localhost:8080" + + ## RavenDB X509 client certificate setup + # tls_cert = "/etc/telegraf/raven.crt" + # tls_key = "/etc/telegraf/raven.key" + + ## Optional request timeout + ## + ## Timeout, specifies the amount of time to wait + ## for a server's response headers after fully writing the request and + ## time limit for requests made by this client + # timeout = "5s" + + ## List of statistics which are collected + # At least one is required + # Allowed values: server, databases, indexes, collections + # + # stats_include = ["server", "databases", "indexes", "collections"] + + ## List of db where database stats are collected + ## If empty, all db are concerned + # db_stats_dbs = [] + + ## List of db where index status are collected + ## If empty, all indexes from all db are concerned + # index_stats_dbs = [] + + ## List of db where collection status are collected + ## If empty, all collections from all db are concerned + # collection_stats_dbs = [] +` + +func (r *RavenDB) SampleConfig() string { + return sampleConfig +} + +func (r *RavenDB) Description() string { + return "Reads metrics from RavenDB servers via the Monitoring Endpoints" +} + +func (r *RavenDB) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + for _, statToCollect := range r.StatsInclude { + wg.Add(1) + + switch statToCollect { + case "server": + go func() { + defer wg.Done() + r.gatherServer(acc) + }() + case "databases": + go func() { + defer wg.Done() + r.gatherDatabases(acc) + }() + case "indexes": + go func() { + defer wg.Done() + r.gatherIndexes(acc) + }() + case "collections": + go func() { + defer wg.Done() + r.gatherCollections(acc) + }() + } + } + + wg.Wait() + + return nil +} + +func (r *RavenDB) ensureClient() error { + if r.client != nil { + return nil + } + + tlsCfg, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(r.Timeout), + TLSClientConfig: tlsCfg, + } + r.client = &http.Client{ + Transport: tr, + Timeout: time.Duration(r.Timeout), + } + + return nil +} + +func (r *RavenDB) requestJSON(u string, target interface{}) error { + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return err + } + + resp, err := r.client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + r.Log.Debugf("%s: %s", u, resp.Status) + if resp.StatusCode >= 400 { + return fmt.Errorf("invalid response code to request '%s': %d - %s", r.URL, resp.StatusCode, resp.Status) + } + + return json.NewDecoder(resp.Body).Decode(target) +} + +func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { + serverResponse := &serverMetricsResponse{} + + err := r.requestJSON(r.requestURLServer, &serverResponse) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "cluster_id": serverResponse.Cluster.ID, + "node_tag": serverResponse.Cluster.NodeTag, + "url": r.URL, + } + + if serverResponse.Config.PublicServerURL != nil { + tags["public_server_url"] = *serverResponse.Config.PublicServerURL + } + + fields := map[string]interface{}{ + "backup_current_number_of_running_backups": serverResponse.Backup.CurrentNumberOfRunningBackups, + "backup_max_number_of_concurrent_backups": serverResponse.Backup.MaxNumberOfConcurrentBackups, + "certificate_server_certificate_expiration_left_in_sec": serverResponse.Certificate.ServerCertificateExpirationLeftInSec, + "cluster_current_term": serverResponse.Cluster.CurrentTerm, + "cluster_index": serverResponse.Cluster.Index, + "cluster_node_state": serverResponse.Cluster.NodeState, + "config_server_urls": strings.Join(serverResponse.Config.ServerUrls, ";"), + "cpu_assigned_processor_count": serverResponse.CPU.AssignedProcessorCount, + "cpu_machine_io_wait": serverResponse.CPU.MachineIoWait, + "cpu_machine_usage": serverResponse.CPU.MachineUsage, + "cpu_process_usage": serverResponse.CPU.ProcessUsage, + "cpu_processor_count": serverResponse.CPU.ProcessorCount, + "cpu_thread_pool_available_worker_threads": serverResponse.CPU.ThreadPoolAvailableWorkerThreads, + "cpu_thread_pool_available_completion_port_threads": serverResponse.CPU.ThreadPoolAvailableCompletionPortThreads, + "databases_loaded_count": serverResponse.Databases.LoadedCount, + "databases_total_count": serverResponse.Databases.TotalCount, + "disk_remaining_storage_space_percentage": serverResponse.Disk.RemainingStorageSpacePercentage, + "disk_system_store_used_data_file_size_in_mb": serverResponse.Disk.SystemStoreUsedDataFileSizeInMb, + "disk_system_store_total_data_file_size_in_mb": serverResponse.Disk.SystemStoreTotalDataFileSizeInMb, + "disk_total_free_space_in_mb": serverResponse.Disk.TotalFreeSpaceInMb, + "license_expiration_left_in_sec": serverResponse.License.ExpirationLeftInSec, + "license_max_cores": serverResponse.License.MaxCores, + "license_type": serverResponse.License.Type, + "license_utilized_cpu_cores": serverResponse.License.UtilizedCPUCores, + "memory_allocated_in_mb": serverResponse.Memory.AllocatedMemoryInMb, + "memory_installed_in_mb": serverResponse.Memory.InstalledMemoryInMb, + "memory_low_memory_severity": serverResponse.Memory.LowMemorySeverity, + "memory_physical_in_mb": serverResponse.Memory.PhysicalMemoryInMb, + "memory_total_dirty_in_mb": serverResponse.Memory.TotalDirtyInMb, + "memory_total_swap_size_in_mb": serverResponse.Memory.TotalSwapSizeInMb, + "memory_total_swap_usage_in_mb": serverResponse.Memory.TotalSwapUsageInMb, + "memory_working_set_swap_usage_in_mb": serverResponse.Memory.WorkingSetSwapUsageInMb, + "network_concurrent_requests_count": serverResponse.Network.ConcurrentRequestsCount, + "network_last_authorized_non_cluster_admin_request_time_in_sec": serverResponse.Network.LastAuthorizedNonClusterAdminRequestTimeInSec, + "network_last_request_time_in_sec": serverResponse.Network.LastRequestTimeInSec, + "network_requests_per_sec": serverResponse.Network.RequestsPerSec, + "network_tcp_active_connections": serverResponse.Network.TCPActiveConnections, + "network_total_requests": serverResponse.Network.TotalRequests, + "server_full_version": serverResponse.ServerFullVersion, + "server_process_id": serverResponse.ServerProcessID, + "server_version": serverResponse.ServerVersion, + "uptime_in_sec": serverResponse.UpTimeInSec, + } + + if serverResponse.Config.TCPServerURLs != nil { + fields["config_tcp_server_urls"] = strings.Join(serverResponse.Config.TCPServerURLs, ";") + } + + if serverResponse.Config.PublicTCPServerURLs != nil { + fields["config_public_tcp_server_urls"] = strings.Join(serverResponse.Config.PublicTCPServerURLs, ";") + } + + if serverResponse.Certificate.WellKnownAdminCertificates != nil { + fields["certificate_well_known_admin_certificates"] = strings.Join(serverResponse.Certificate.WellKnownAdminCertificates, ";") + } + + acc.AddFields("ravendb_server", fields, tags) +} + +func (r *RavenDB) gatherDatabases(acc telegraf.Accumulator) { + databasesResponse := &databasesMetricResponse{} + + err := r.requestJSON(r.requestURLDatabases, &databasesResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, dbResponse := range databasesResponse.Results { + tags := map[string]string{ + "database_id": dbResponse.DatabaseID, + "database_name": dbResponse.DatabaseName, + "node_tag": databasesResponse.NodeTag, + "url": r.URL, + } + + if databasesResponse.PublicServerURL != nil { + tags["public_server_url"] = *databasesResponse.PublicServerURL + } + + fields := map[string]interface{}{ + "counts_alerts": dbResponse.Counts.Alerts, + "counts_attachments": dbResponse.Counts.Attachments, + "counts_documents": dbResponse.Counts.Documents, + "counts_performance_hints": dbResponse.Counts.PerformanceHints, + "counts_rehabs": dbResponse.Counts.Rehabs, + "counts_replication_factor": dbResponse.Counts.ReplicationFactor, + "counts_revisions": dbResponse.Counts.Revisions, + "counts_unique_attachments": dbResponse.Counts.UniqueAttachments, + "indexes_auto_count": dbResponse.Indexes.AutoCount, + "indexes_count": dbResponse.Indexes.Count, + "indexes_errored_count": dbResponse.Indexes.ErroredCount, + "indexes_errors_count": dbResponse.Indexes.ErrorsCount, + "indexes_disabled_count": dbResponse.Indexes.DisabledCount, + "indexes_idle_count": dbResponse.Indexes.IdleCount, + "indexes_stale_count": dbResponse.Indexes.StaleCount, + "indexes_static_count": dbResponse.Indexes.StaticCount, + "statistics_doc_puts_per_sec": dbResponse.Statistics.DocPutsPerSec, + "statistics_map_index_indexes_per_sec": dbResponse.Statistics.MapIndexIndexesPerSec, + "statistics_map_reduce_index_mapped_per_sec": dbResponse.Statistics.MapReduceIndexMappedPerSec, + "statistics_map_reduce_index_reduced_per_sec": dbResponse.Statistics.MapReduceIndexReducedPerSec, + "statistics_request_average_duration_in_ms": dbResponse.Statistics.RequestAverageDurationInMs, + "statistics_requests_count": dbResponse.Statistics.RequestsCount, + "statistics_requests_per_sec": dbResponse.Statistics.RequestsPerSec, + "storage_documents_allocated_data_file_in_mb": dbResponse.Storage.DocumentsAllocatedDataFileInMb, + "storage_documents_used_data_file_in_mb": dbResponse.Storage.DocumentsUsedDataFileInMb, + "storage_indexes_allocated_data_file_in_mb": dbResponse.Storage.IndexesAllocatedDataFileInMb, + "storage_indexes_used_data_file_in_mb": dbResponse.Storage.IndexesUsedDataFileInMb, + "storage_total_allocated_storage_file_in_mb": dbResponse.Storage.TotalAllocatedStorageFileInMb, + "storage_total_free_space_in_mb": dbResponse.Storage.TotalFreeSpaceInMb, + "time_since_last_backup_in_sec": dbResponse.TimeSinceLastBackupInSec, + "uptime_in_sec": dbResponse.UptimeInSec, + } + + acc.AddFields("ravendb_databases", fields, tags) + } +} + +func (r *RavenDB) gatherIndexes(acc telegraf.Accumulator) { + indexesResponse := &indexesMetricResponse{} + + err := r.requestJSON(r.requestURLIndexes, &indexesResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, perDbIndexResponse := range indexesResponse.Results { + for _, indexResponse := range perDbIndexResponse.Indexes { + tags := map[string]string{ + "database_name": perDbIndexResponse.DatabaseName, + "index_name": indexResponse.IndexName, + "node_tag": indexesResponse.NodeTag, + "url": r.URL, + } + + if indexesResponse.PublicServerURL != nil { + tags["public_server_url"] = *indexesResponse.PublicServerURL + } + + fields := map[string]interface{}{ + "errors": indexResponse.Errors, + "is_invalid": indexResponse.IsInvalid, + "lock_mode": indexResponse.LockMode, + "mapped_per_sec": indexResponse.MappedPerSec, + "priority": indexResponse.Priority, + "reduced_per_sec": indexResponse.ReducedPerSec, + "state": indexResponse.State, + "status": indexResponse.Status, + "time_since_last_indexing_in_sec": indexResponse.TimeSinceLastIndexingInSec, + "time_since_last_query_in_sec": indexResponse.TimeSinceLastQueryInSec, + "type": indexResponse.Type, + } + + acc.AddFields("ravendb_indexes", fields, tags) + } + } +} + +func (r *RavenDB) gatherCollections(acc telegraf.Accumulator) { + collectionsResponse := &collectionsMetricResponse{} + + err := r.requestJSON(r.requestURLCollection, &collectionsResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, perDbCollectionMetrics := range collectionsResponse.Results { + for _, collectionMetrics := range perDbCollectionMetrics.Collections { + tags := map[string]string{ + "collection_name": collectionMetrics.CollectionName, + "database_name": perDbCollectionMetrics.DatabaseName, + "node_tag": collectionsResponse.NodeTag, + "url": r.URL, + } + + if collectionsResponse.PublicServerURL != nil { + tags["public_server_url"] = *collectionsResponse.PublicServerURL + } + + fields := map[string]interface{}{ + "documents_count": collectionMetrics.DocumentsCount, + "documents_size_in_bytes": collectionMetrics.DocumentsSizeInBytes, + "revisions_size_in_bytes": collectionMetrics.RevisionsSizeInBytes, + "tombstones_size_in_bytes": collectionMetrics.TombstonesSizeInBytes, + "total_size_in_bytes": collectionMetrics.TotalSizeInBytes, + } + + acc.AddFields("ravendb_collections", fields, tags) + } + } +} + +func prepareDBNamesURLPart(dbNames []string) string { + if len(dbNames) == 0 { + return "" + } + result := "?" + dbNames[0] + for _, db := range dbNames[1:] { + result += "&name=" + url.QueryEscape(db) + } + + return result +} + +func (r *RavenDB) Init() error { + if r.URL == "" { + r.URL = defaultURL + } + + r.requestURLServer = r.URL + "/admin/monitoring/v1/server" + r.requestURLDatabases = r.URL + "/admin/monitoring/v1/databases" + prepareDBNamesURLPart(r.DbStatsDbs) + r.requestURLIndexes = r.URL + "/admin/monitoring/v1/indexes" + prepareDBNamesURLPart(r.IndexStatsDbs) + r.requestURLCollection = r.URL + "/admin/monitoring/v1/collections" + prepareDBNamesURLPart(r.IndexStatsDbs) + + err := choice.CheckSlice(r.StatsInclude, []string{"server", "databases", "indexes", "collections"}) + if err != nil { + return err + } + + err = r.ensureClient() + if nil != err { + r.Log.Errorf("Error with Client %s", err) + return err + } + + return nil +} + +func init() { + inputs.Add("ravendb", func() telegraf.Input { + return &RavenDB{ + Timeout: config.Duration(defaultTimeout * time.Second), + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + } + }) +} diff --git a/plugins/inputs/ravendb/ravendb_dto.go b/plugins/inputs/ravendb/ravendb_dto.go new file mode 100644 index 0000000000000..87ae34dccc541 --- /dev/null +++ b/plugins/inputs/ravendb/ravendb_dto.go @@ -0,0 +1,199 @@ +package ravendb + +type serverMetricsResponse struct { + ServerVersion string `json:"ServerVersion"` + ServerFullVersion string `json:"ServerFullVersion"` + UpTimeInSec int32 `json:"UpTimeInSec"` + ServerProcessID int32 `json:"ServerProcessId"` + Backup backupMetrics `json:"Backup"` + Config configurationMetrics `json:"Config"` + CPU cpuMetrics `json:"Cpu"` + Memory memoryMetrics `json:"Memory"` + Disk diskMetrics `json:"Disk"` + License licenseMetrics `json:"License"` + Network networkMetrics `json:"Network"` + Certificate certificateMetrics `json:"Certificate"` + Cluster clusterMetrics `json:"Cluster"` + Databases allDatabasesMetrics `json:"Databases"` +} + +type backupMetrics struct { + CurrentNumberOfRunningBackups int32 `json:"CurrentNumberOfRunningBackups"` + MaxNumberOfConcurrentBackups int32 `json:"MaxNumberOfConcurrentBackups"` +} + +type configurationMetrics struct { + ServerUrls []string `json:"ServerUrls"` + PublicServerURL *string `json:"PublicServerUrl"` + TCPServerURLs []string `json:"TcpServerUrls"` + PublicTCPServerURLs []string `json:"PublicTcpServerUrls"` +} + +type cpuMetrics struct { + ProcessUsage float64 `json:"ProcessUsage"` + MachineUsage float64 `json:"MachineUsage"` + MachineIoWait *float64 `json:"MachineIoWait"` + ProcessorCount int32 `json:"ProcessorCount"` + AssignedProcessorCount int32 `json:"AssignedProcessorCount"` + ThreadPoolAvailableWorkerThreads int32 `json:"ThreadPoolAvailableWorkerThreads"` + ThreadPoolAvailableCompletionPortThreads int32 `json:"ThreadPoolAvailableCompletionPortThreads"` +} + +type memoryMetrics struct { + AllocatedMemoryInMb int64 `json:"AllocatedMemoryInMb"` + PhysicalMemoryInMb int64 `json:"PhysicalMemoryInMb"` + InstalledMemoryInMb int64 `json:"InstalledMemoryInMb"` + LowMemorySeverity string `json:"LowMemorySeverity"` + TotalSwapSizeInMb int64 `json:"TotalSwapSizeInMb"` + TotalSwapUsageInMb int64 `json:"TotalSwapUsageInMb"` + WorkingSetSwapUsageInMb int64 `json:"WorkingSetSwapUsageInMb"` + TotalDirtyInMb int64 `json:"TotalDirtyInMb"` +} + +type diskMetrics struct { + SystemStoreUsedDataFileSizeInMb int64 `json:"SystemStoreUsedDataFileSizeInMb"` + SystemStoreTotalDataFileSizeInMb int64 `json:"SystemStoreTotalDataFileSizeInMb"` + TotalFreeSpaceInMb int64 `json:"TotalFreeSpaceInMb"` + RemainingStorageSpacePercentage int64 `json:"RemainingStorageSpacePercentage"` +} + +type licenseMetrics struct { + Type string `json:"Type"` + ExpirationLeftInSec *float64 `json:"ExpirationLeftInSec"` + UtilizedCPUCores int32 `json:"UtilizedCpuCores"` + MaxCores int32 `json:"MaxCores"` +} + +type networkMetrics struct { + TCPActiveConnections int64 `json:"TcpActiveConnections"` + ConcurrentRequestsCount int64 `json:"ConcurrentRequestsCount"` + TotalRequests int64 `json:"TotalRequests"` + RequestsPerSec float64 `json:"RequestsPerSec"` + LastRequestTimeInSec *float64 `json:"LastRequestTimeInSec"` + LastAuthorizedNonClusterAdminRequestTimeInSec *float64 `json:"LastAuthorizedNonClusterAdminRequestTimeInSec"` +} + +type certificateMetrics struct { + ServerCertificateExpirationLeftInSec *float64 `json:"ServerCertificateExpirationLeftInSec"` + WellKnownAdminCertificates []string `json:"WellKnownAdminCertificates"` +} + +type clusterMetrics struct { + NodeTag string `json:"NodeTag"` + NodeState string `json:"NodeState"` + CurrentTerm int64 `json:"CurrentTerm"` + Index int64 `json:"Index"` + ID string `json:"Id"` +} + +type allDatabasesMetrics struct { + TotalCount int32 `json:"TotalCount"` + LoadedCount int32 `json:"LoadedCount"` +} + +type databasesMetricResponse struct { + Results []*databaseMetrics `json:"Results"` + PublicServerURL *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type databaseMetrics struct { + DatabaseName string `json:"DatabaseName"` + DatabaseID string `json:"DatabaseId"` + UptimeInSec float64 `json:"UptimeInSec"` + TimeSinceLastBackupInSec *float64 `json:"TimeSinceLastBackupInSec"` + + Counts databaseCounts `json:"Counts"` + Statistics databaseStatistics `json:"Statistics"` + + Indexes databaseIndexesMetrics `json:"Indexes"` + Storage databaseStorageMetrics `json:"Storage"` +} + +type databaseCounts struct { + Documents int64 `json:"Documents"` + Revisions int64 `json:"Revisions"` + Attachments int64 `json:"Attachments"` + UniqueAttachments int64 `json:"UniqueAttachments"` + Alerts int64 `json:"Alerts"` + Rehabs int32 `json:"Rehabs"` + PerformanceHints int64 `json:"PerformanceHints"` + ReplicationFactor int32 `json:"ReplicationFactor"` +} + +type databaseStatistics struct { + DocPutsPerSec float64 `json:"DocPutsPerSec"` + MapIndexIndexesPerSec float64 `json:"MapIndexIndexesPerSec"` + MapReduceIndexMappedPerSec float64 `json:"MapReduceIndexMappedPerSec"` + MapReduceIndexReducedPerSec float64 `json:"MapReduceIndexReducedPerSec"` + RequestsPerSec float64 `json:"RequestsPerSec"` + RequestsCount int32 `json:"RequestsCount"` + RequestAverageDurationInMs float64 `json:"RequestAverageDurationInMs"` +} + +type databaseIndexesMetrics struct { + Count int64 `json:"Count"` + StaleCount int32 `json:"StaleCount"` + ErrorsCount int64 `json:"ErrorsCount"` + StaticCount int32 `json:"StaticCount"` + AutoCount int32 `json:"AutoCount"` + IdleCount int32 `json:"IdleCount"` + DisabledCount int32 `json:"DisabledCount"` + ErroredCount int32 `json:"ErroredCount"` +} + +type databaseStorageMetrics struct { + DocumentsAllocatedDataFileInMb int64 `json:"DocumentsAllocatedDataFileInMb"` + DocumentsUsedDataFileInMb int64 `json:"DocumentsUsedDataFileInMb"` + IndexesAllocatedDataFileInMb int64 `json:"IndexesAllocatedDataFileInMb"` + IndexesUsedDataFileInMb int64 `json:"IndexesUsedDataFileInMb"` + TotalAllocatedStorageFileInMb int64 `json:"TotalAllocatedStorageFileInMb"` + TotalFreeSpaceInMb int64 `json:"TotalFreeSpaceInMb"` +} + +type indexesMetricResponse struct { + Results []*perDatabaseIndexMetrics `json:"Results"` + PublicServerURL *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type perDatabaseIndexMetrics struct { + DatabaseName string `json:"DatabaseName"` + Indexes []*indexMetrics `json:"Indexes"` +} + +type indexMetrics struct { + IndexName string `json:"IndexName"` + Priority string `json:"Priority"` + State string `json:"State"` + Errors int32 `json:"Errors"` + TimeSinceLastQueryInSec *float64 `json:"TimeSinceLastQueryInSec"` + TimeSinceLastIndexingInSec *float64 `json:"TimeSinceLastIndexingInSec"` + LockMode string `json:"LockMode"` + IsInvalid bool `json:"IsInvalid"` + Status string `json:"Status"` + MappedPerSec float64 `json:"MappedPerSec"` + ReducedPerSec float64 `json:"ReducedPerSec"` + Type string `json:"Type"` + EntriesCount int32 `json:"EntriesCount"` +} + +type collectionsMetricResponse struct { + Results []*perDatabaseCollectionMetrics `json:"Results"` + PublicServerURL *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type perDatabaseCollectionMetrics struct { + DatabaseName string `json:"DatabaseName"` + Collections []*collectionMetrics `json:"Collections"` +} + +type collectionMetrics struct { + CollectionName string `json:"CollectionName"` + DocumentsCount int64 `json:"DocumentsCount"` + TotalSizeInBytes int64 `json:"TotalSizeInBytes"` + DocumentsSizeInBytes int64 `json:"DocumentsSizeInBytes"` + TombstonesSizeInBytes int64 `json:"TombstonesSizeInBytes"` + RevisionsSizeInBytes int64 `json:"RevisionsSizeInBytes"` +} diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go new file mode 100644 index 0000000000000..3da1d0190a055 --- /dev/null +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -0,0 +1,388 @@ +package ravendb + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +// Test against fully filled data +func TestRavenDBGeneratesMetricsFull(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/admin/monitoring/v1/databases": + jsonFilePath = "testdata/databases_full.json" + case "/admin/monitoring/v1/server": + jsonFilePath = "testdata/server_full.json" + case "/admin/monitoring/v1/indexes": + jsonFilePath = "testdata/indexes_full.json" + case "/admin/monitoring/v1/collections": + jsonFilePath = "testdata/collections_full.json" + + default: + require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + + _, err = w.Write(data) + require.NoError(t, err) + })) + defer ts.Close() + + r := &RavenDB{ + URL: ts.URL, + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + Log: testutil.Logger{}, + } + + require.NoError(t, r.Init()) + + acc := &testutil.Accumulator{} + + err := acc.GatherError(r.Gather) + require.NoError(t, err) + + serverFields := map[string]interface{}{ + "server_version": "5.1", + "server_full_version": "5.1.1-custom-51", + "uptime_in_sec": int64(30), + "server_process_id": 26360, + "config_server_urls": "http://127.0.0.1:8080;http://192.168.0.1:8080", + "config_tcp_server_urls": "tcp://127.0.0.1:3888;tcp://192.168.0.1:3888", + "config_public_tcp_server_urls": "tcp://2.3.4.5:3888;tcp://6.7.8.9:3888", + "backup_max_number_of_concurrent_backups": 4, + "backup_current_number_of_running_backups": 2, + "cpu_process_usage": 6.28, + "cpu_machine_usage": 41.05, + "cpu_machine_io_wait": 2.55, + "cpu_processor_count": 8, + "cpu_assigned_processor_count": 7, + "cpu_thread_pool_available_worker_threads": 32766, + "cpu_thread_pool_available_completion_port_threads": 1000, + "memory_allocated_in_mb": 235, + "memory_installed_in_mb": 16384, + "memory_physical_in_mb": 16250, + "memory_low_memory_severity": "None", + "memory_total_swap_size_in_mb": 1024, + "memory_total_swap_usage_in_mb": 456, + "memory_working_set_swap_usage_in_mb": 89, + "memory_total_dirty_in_mb": 1, + "disk_system_store_used_data_file_size_in_mb": 28, + "disk_system_store_total_data_file_size_in_mb": 32, + "disk_total_free_space_in_mb": 52078, + "disk_remaining_storage_space_percentage": 22, + "license_type": "Enterprise", + "license_expiration_left_in_sec": 25466947.5, + "license_utilized_cpu_cores": 8, + "license_max_cores": 256, + "network_tcp_active_connections": 84, + "network_concurrent_requests_count": 1, + "network_total_requests": 3, + "network_requests_per_sec": 0.03322, + "network_last_request_time_in_sec": 0.0264977, + "network_last_authorized_non_cluster_admin_request_time_in_sec": 0.04, + "certificate_server_certificate_expiration_left_in_sec": float64(104), + "certificate_well_known_admin_certificates": "a909502dd82ae41433e6f83886b00d4277a32a7b;4444444444444444444444444444444444444444", + "cluster_node_state": "Leader", + "cluster_current_term": 28, + "cluster_index": 104, + "databases_total_count": 25, + "databases_loaded_count": 2, + } + + serverTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "cluster_id": "6b535a18-558f-4e53-a479-a514efc16aab", + "public_server_url": "http://raven1:8080", + } + + defaultTime := time.Unix(0, 0) + + dbFields := map[string]interface{}{ + "uptime_in_sec": float64(1396), + "time_since_last_backup_in_sec": 104.3, + "counts_documents": 425189, + "counts_revisions": 429605, + "counts_attachments": 17, + "counts_unique_attachments": 16, + "counts_alerts": 2, + "counts_rehabs": 3, + "counts_performance_hints": 5, + "counts_replication_factor": 2, + "statistics_doc_puts_per_sec": 23.4, + "statistics_map_index_indexes_per_sec": 82.5, + "statistics_map_reduce_index_mapped_per_sec": 50.3, + "statistics_map_reduce_index_reduced_per_sec": 85.2, + "statistics_requests_per_sec": 22.5, + "statistics_requests_count": 809, + "statistics_request_average_duration_in_ms": 0.55, + "indexes_count": 7, + "indexes_stale_count": 1, + "indexes_errors_count": 2, + "indexes_static_count": 7, + "indexes_auto_count": 3, + "indexes_idle_count": 4, + "indexes_disabled_count": 5, + "indexes_errored_count": 6, + "storage_documents_allocated_data_file_in_mb": 1024, + "storage_documents_used_data_file_in_mb": 942, + "storage_indexes_allocated_data_file_in_mb": 464, + "storage_indexes_used_data_file_in_mb": 278, + "storage_total_allocated_storage_file_in_mb": 1496, + "storage_total_free_space_in_mb": 52074, + } + + dbTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db2", + "database_id": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "public_server_url": "http://myhost:8080", + } + + indexFields := map[string]interface{}{ + "priority": "Normal", + "state": "Normal", + "errors": 0, + "time_since_last_query_in_sec": 3.4712567, + "time_since_last_indexing_in_sec": 3.4642612, + "lock_mode": "Unlock", + "is_invalid": true, + "status": "Running", + "mapped_per_sec": 102.34, + "reduced_per_sec": 593.23, + "type": "MapReduce", + } + + indexTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "public_server_url": "http://localhost:8080", + "database_name": "db1", + "index_name": "Product/Rating", + } + + collectionFields := map[string]interface{}{ + "documents_count": 830, + "total_size_in_bytes": 2744320, + "documents_size_in_bytes": 868352, + "tombstones_size_in_bytes": 122880, + "revisions_size_in_bytes": 1753088, + } + + collectionTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "collection_name": "Orders", + "public_server_url": "http://localhost:8080", + } + + serverExpected := testutil.MustMetric("ravendb_server", serverTags, serverFields, defaultTime) + dbExpected := testutil.MustMetric("ravendb_databases", dbTags, dbFields, defaultTime) + indexExpected := testutil.MustMetric("ravendb_indexes", indexTags, indexFields, defaultTime) + collectionsExpected := testutil.MustMetric("ravendb_collections", collectionTags, collectionFields, defaultTime) + + for _, metric := range acc.GetTelegrafMetrics() { + switch metric.Name() { + case "ravendb_server": + testutil.RequireMetricEqual(t, serverExpected, metric, testutil.IgnoreTime()) + case "ravendb_databases": + testutil.RequireMetricEqual(t, dbExpected, metric, testutil.IgnoreTime()) + case "ravendb_indexes": + testutil.RequireMetricEqual(t, indexExpected, metric, testutil.IgnoreTime()) + case "ravendb_collections": + testutil.RequireMetricEqual(t, collectionsExpected, metric, testutil.IgnoreTime()) + } + } +} + +// Test against minimum filled data +func TestRavenDBGeneratesMetricsMin(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/admin/monitoring/v1/databases": + jsonFilePath = "testdata/databases_min.json" + case "/admin/monitoring/v1/server": + jsonFilePath = "testdata/server_min.json" + case "/admin/monitoring/v1/indexes": + jsonFilePath = "testdata/indexes_min.json" + case "/admin/monitoring/v1/collections": + jsonFilePath = "testdata/collections_min.json" + default: + require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + + _, err = w.Write(data) + require.NoError(t, err) + })) + defer ts.Close() + + r := &RavenDB{ + URL: ts.URL, + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + Log: testutil.Logger{}, + } + + require.NoError(t, r.Init()) + + acc := &testutil.Accumulator{} + + err := acc.GatherError(r.Gather) + require.NoError(t, err) + + serverFields := map[string]interface{}{ + "server_version": "5.1", + "server_full_version": "5.1.1-custom-51", + "uptime_in_sec": 30, + "server_process_id": 26360, + "config_server_urls": "http://127.0.0.1:8080", + "backup_max_number_of_concurrent_backups": 4, + "backup_current_number_of_running_backups": 2, + "cpu_process_usage": 6.28, + "cpu_machine_usage": 41.07, + "cpu_processor_count": 8, + "cpu_assigned_processor_count": 7, + "cpu_thread_pool_available_worker_threads": 32766, + "cpu_thread_pool_available_completion_port_threads": 1000, + "memory_allocated_in_mb": 235, + "memory_installed_in_mb": 16384, + "memory_physical_in_mb": 16250, + "memory_low_memory_severity": "Low", + "memory_total_swap_size_in_mb": 1024, + "memory_total_swap_usage_in_mb": 456, + "memory_working_set_swap_usage_in_mb": 89, + "memory_total_dirty_in_mb": 1, + "disk_system_store_used_data_file_size_in_mb": 28, + "disk_system_store_total_data_file_size_in_mb": 32, + "disk_total_free_space_in_mb": 52078, + "disk_remaining_storage_space_percentage": 22, + "license_type": "Enterprise", + "license_utilized_cpu_cores": 8, + "license_max_cores": 256, + "network_tcp_active_connections": 84, + "network_concurrent_requests_count": 1, + "network_total_requests": 3, + "network_requests_per_sec": 0.03322, + "cluster_node_state": "Leader", + "cluster_current_term": 28, + "cluster_index": 104, + "databases_total_count": 25, + "databases_loaded_count": 2, + } + + serverTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "cluster_id": "6b535a18-558f-4e53-a479-a514efc16aab", + } + + dbFields := map[string]interface{}{ + "uptime_in_sec": float64(1396), + "counts_documents": 425189, + "counts_revisions": 429605, + "counts_attachments": 17, + "counts_unique_attachments": 16, + "counts_alerts": 2, + "counts_rehabs": 3, + "counts_performance_hints": 5, + "counts_replication_factor": 2, + "statistics_doc_puts_per_sec": 23.4, + "statistics_map_index_indexes_per_sec": 82.5, + "statistics_map_reduce_index_mapped_per_sec": 50.3, + "statistics_map_reduce_index_reduced_per_sec": 85.2, + "statistics_requests_per_sec": 22.5, + "statistics_requests_count": 809, + "statistics_request_average_duration_in_ms": 0.55, + "indexes_count": 7, + "indexes_stale_count": 1, + "indexes_errors_count": 2, + "indexes_static_count": 7, + "indexes_auto_count": 3, + "indexes_idle_count": 4, + "indexes_disabled_count": 5, + "indexes_errored_count": 6, + "storage_documents_allocated_data_file_in_mb": 1024, + "storage_documents_used_data_file_in_mb": 942, + "storage_indexes_allocated_data_file_in_mb": 464, + "storage_indexes_used_data_file_in_mb": 278, + "storage_total_allocated_storage_file_in_mb": 1496, + "storage_total_free_space_in_mb": 52074, + } + + dbTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db2", + "database_id": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + } + + indexFields := map[string]interface{}{ + "priority": "Normal", + "state": "Normal", + "errors": 0, + "lock_mode": "Unlock", + "is_invalid": false, + "status": "Running", + "mapped_per_sec": 102.34, + "reduced_per_sec": 593.23, + "type": "MapReduce", + } + + indexTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "index_name": "Product/Rating", + } + + collectionFields := map[string]interface{}{ + "documents_count": 830, + "total_size_in_bytes": 2744320, + "documents_size_in_bytes": 868352, + "tombstones_size_in_bytes": 122880, + "revisions_size_in_bytes": 1753088, + } + + collectionTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "collection_name": "Orders", + } + + defaultTime := time.Unix(0, 0) + + serverExpected := testutil.MustMetric("ravendb_server", serverTags, serverFields, defaultTime) + dbExpected := testutil.MustMetric("ravendb_databases", dbTags, dbFields, defaultTime) + indexExpected := testutil.MustMetric("ravendb_indexes", indexTags, indexFields, defaultTime) + collectionsExpected := testutil.MustMetric("ravendb_collections", collectionTags, collectionFields, defaultTime) + + for _, metric := range acc.GetTelegrafMetrics() { + switch metric.Name() { + case "ravendb_server": + testutil.RequireMetricEqual(t, serverExpected, metric, testutil.IgnoreTime()) + case "ravendb_databases": + testutil.RequireMetricEqual(t, dbExpected, metric, testutil.IgnoreTime()) + case "ravendb_indexes": + testutil.RequireMetricEqual(t, indexExpected, metric, testutil.IgnoreTime()) + case "ravendb_collections": + testutil.RequireMetricEqual(t, collectionsExpected, metric, testutil.IgnoreTime()) + } + } +} diff --git a/plugins/inputs/ravendb/testdata/collections_full.json b/plugins/inputs/ravendb/testdata/collections_full.json new file mode 100644 index 0000000000000..db91e90868d9b --- /dev/null +++ b/plugins/inputs/ravendb/testdata/collections_full.json @@ -0,0 +1,19 @@ +{ + "PublicServerUrl": "http://localhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Collections": [ + { + "CollectionName": "Orders", + "DocumentsCount": 830, + "TotalSizeInBytes": 2744320, + "DocumentsSizeInBytes": 868352, + "TombstonesSizeInBytes": 122880, + "RevisionsSizeInBytes": 1753088 + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/collections_min.json b/plugins/inputs/ravendb/testdata/collections_min.json new file mode 100644 index 0000000000000..edd636d21e202 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/collections_min.json @@ -0,0 +1,19 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Collections": [ + { + "CollectionName": "Orders", + "DocumentsCount": 830, + "TotalSizeInBytes": 2744320, + "DocumentsSizeInBytes": 868352, + "TombstonesSizeInBytes": 122880, + "RevisionsSizeInBytes": 1753088 + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/databases_full.json b/plugins/inputs/ravendb/testdata/databases_full.json new file mode 100644 index 0000000000000..1c74568812575 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/databases_full.json @@ -0,0 +1,49 @@ +{ + "PublicServerUrl": "http://myhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db2", + "DatabaseId": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "UptimeInSec": 1396, + "TimeSinceLastBackupInSec": 104.3, + "Counts": { + "Documents": 425189, + "Revisions": 429605, + "Attachments": 17, + "UniqueAttachments": 16, + "Alerts": 2, + "Rehabs": 3, + "PerformanceHints": 5, + "ReplicationFactor": 2 + }, + "Statistics": { + "DocPutsPerSec": 23.4, + "MapIndexIndexesPerSec": 82.5, + "MapReduceIndexMappedPerSec": 50.3, + "MapReduceIndexReducedPerSec": 85.2, + "RequestsPerSec": 22.5, + "RequestsCount": 809, + "RequestAverageDurationInMs": 0.55 + }, + "Indexes": { + "Count": 7, + "StaleCount": 1, + "ErrorsCount": 2, + "StaticCount": 7, + "AutoCount": 3, + "IdleCount": 4, + "DisabledCount": 5, + "ErroredCount": 6 + }, + "Storage": { + "DocumentsAllocatedDataFileInMb": 1024, + "DocumentsUsedDataFileInMb": 942, + "IndexesAllocatedDataFileInMb": 464, + "IndexesUsedDataFileInMb": 278, + "TotalAllocatedStorageFileInMb": 1496, + "TotalFreeSpaceInMb": 52074 + } + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/databases_min.json b/plugins/inputs/ravendb/testdata/databases_min.json new file mode 100644 index 0000000000000..48a1ccbb6b7ad --- /dev/null +++ b/plugins/inputs/ravendb/testdata/databases_min.json @@ -0,0 +1,49 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db2", + "DatabaseId": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "UptimeInSec": 1396, + "TimeSinceLastBackupInSec": null, + "Counts": { + "Documents": 425189, + "Revisions": 429605, + "Attachments": 17, + "UniqueAttachments": 16, + "Alerts": 2, + "Rehabs": 3, + "PerformanceHints": 5, + "ReplicationFactor": 2 + }, + "Statistics": { + "DocPutsPerSec": 23.4, + "MapIndexIndexesPerSec": 82.5, + "MapReduceIndexMappedPerSec": 50.3, + "MapReduceIndexReducedPerSec": 85.2, + "RequestsPerSec": 22.5, + "RequestsCount": 809, + "RequestAverageDurationInMs": 0.55 + }, + "Indexes": { + "Count": 7, + "StaleCount": 1, + "ErrorsCount": 2, + "StaticCount": 7, + "AutoCount": 3, + "IdleCount": 4, + "DisabledCount": 5, + "ErroredCount": 6 + }, + "Storage": { + "DocumentsAllocatedDataFileInMb": 1024, + "DocumentsUsedDataFileInMb": 942, + "IndexesAllocatedDataFileInMb": 464, + "IndexesUsedDataFileInMb": 278, + "TotalAllocatedStorageFileInMb": 1496, + "TotalFreeSpaceInMb": 52074 + } + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/indexes_full.json b/plugins/inputs/ravendb/testdata/indexes_full.json new file mode 100644 index 0000000000000..d67ded7d18800 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/indexes_full.json @@ -0,0 +1,25 @@ +{ + "PublicServerUrl": "http://localhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Indexes": [ + { + "IndexName": "Product/Rating", + "Priority": "Normal", + "State": "Normal", + "Errors": 0, + "TimeSinceLastQueryInSec": 3.4712567, + "TimeSinceLastIndexingInSec": 3.4642612, + "LockMode": "Unlock", + "IsInvalid": true, + "Status": "Running", + "MappedPerSec": 102.34, + "ReducedPerSec": 593.23, + "Type": "MapReduce" + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/indexes_min.json b/plugins/inputs/ravendb/testdata/indexes_min.json new file mode 100644 index 0000000000000..493bda8b7e799 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/indexes_min.json @@ -0,0 +1,25 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Indexes": [ + { + "IndexName": "Product/Rating", + "Priority": "Normal", + "State": "Normal", + "Errors": 0, + "TimeSinceLastQueryInSec": null, + "TimeSinceLastIndexingInSec": null, + "LockMode": "Unlock", + "IsInvalid": false, + "Status": "Running", + "MappedPerSec": 102.34, + "ReducedPerSec": 593.23, + "Type": "MapReduce" + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/server_full.json b/plugins/inputs/ravendb/testdata/server_full.json new file mode 100644 index 0000000000000..edfbbbf7940dc --- /dev/null +++ b/plugins/inputs/ravendb/testdata/server_full.json @@ -0,0 +1,73 @@ +{ + "ServerVersion": "5.1", + "ServerFullVersion": "5.1.1-custom-51", + "UpTimeInSec": 30, + "ServerProcessId": 26360, + "Config": { + "ServerUrls": [ + "http://127.0.0.1:8080", + "http://192.168.0.1:8080" + ], + "PublicServerUrl": "http://raven1:8080", + "TcpServerUrls": ["tcp://127.0.0.1:3888", "tcp://192.168.0.1:3888"], + "PublicTcpServerUrls": ["tcp://2.3.4.5:3888", "tcp://6.7.8.9:3888"] + }, + "Backup": { + "CurrentNumberOfRunningBackups": 2, + "MaxNumberOfConcurrentBackups": 4 + }, + "Cpu": { + "ProcessUsage": 6.28, + "MachineUsage": 41.05, + "MachineIoWait": 2.55, + "ProcessorCount": 8, + "AssignedProcessorCount": 7, + "ThreadPoolAvailableWorkerThreads": 32766, + "ThreadPoolAvailableCompletionPortThreads": 1000 + }, + "Memory": { + "AllocatedMemoryInMb": 235, + "PhysicalMemoryInMb": 16250, + "InstalledMemoryInMb": 16384, + "LowMemorySeverity": "None", + "TotalSwapSizeInMb": 1024, + "TotalSwapUsageInMb": 456, + "WorkingSetSwapUsageInMb": 89, + "TotalDirtyInMb": 1 + }, + "Disk": { + "SystemStoreUsedDataFileSizeInMb": 28, + "SystemStoreTotalDataFileSizeInMb": 32, + "TotalFreeSpaceInMb": 52078, + "RemainingStorageSpacePercentage": 22 + }, + "License": { + "Type": "Enterprise", + "ExpirationLeftInSec": 25466947.5, + "UtilizedCpuCores": 8, + "MaxCores": 256 + }, + "Network": { + "TcpActiveConnections": 84, + "ConcurrentRequestsCount": 1, + "TotalRequests": 3, + "RequestsPerSec": 0.03322, + "LastRequestTimeInSec": 0.0264977, + "LastAuthorizedNonClusterAdminRequestTimeInSec": 0.04 + }, + "Certificate": { + "ServerCertificateExpirationLeftInSec": 104, + "WellKnownAdminCertificates": ["a909502dd82ae41433e6f83886b00d4277a32a7b", "4444444444444444444444444444444444444444"] + }, + "Cluster": { + "NodeTag": "A", + "NodeState": "Leader", + "CurrentTerm": 28, + "Index": 104, + "Id": "6b535a18-558f-4e53-a479-a514efc16aab" + }, + "Databases": { + "TotalCount": 25, + "LoadedCount": 2 + } +} diff --git a/plugins/inputs/ravendb/testdata/server_min.json b/plugins/inputs/ravendb/testdata/server_min.json new file mode 100644 index 0000000000000..e22bd03d4460d --- /dev/null +++ b/plugins/inputs/ravendb/testdata/server_min.json @@ -0,0 +1,72 @@ +{ + "ServerVersion": "5.1", + "ServerFullVersion": "5.1.1-custom-51", + "UpTimeInSec": 30, + "ServerProcessId": 26360, + "Config": { + "ServerUrls": [ + "http://127.0.0.1:8080" + ], + "PublicServerUrl": null, + "TcpServerUrls": null, + "PublicTcpServerUrls": null + }, + "Backup": { + "CurrentNumberOfRunningBackups": 2, + "MaxNumberOfConcurrentBackups": 4 + }, + "Cpu": { + "ProcessUsage": 6.28, + "MachineUsage": 41.07, + "MachineIoWait": null, + "ProcessorCount": 8, + "AssignedProcessorCount": 7, + "ThreadPoolAvailableWorkerThreads": 32766, + "ThreadPoolAvailableCompletionPortThreads": 1000 + }, + "Memory": { + "AllocatedMemoryInMb": 235, + "PhysicalMemoryInMb": 16250, + "InstalledMemoryInMb": 16384, + "LowMemorySeverity": "Low", + "TotalSwapSizeInMb": 1024, + "TotalSwapUsageInMb": 456, + "WorkingSetSwapUsageInMb": 89, + "TotalDirtyInMb": 1 + }, + "Disk": { + "SystemStoreUsedDataFileSizeInMb": 28, + "SystemStoreTotalDataFileSizeInMb": 32, + "TotalFreeSpaceInMb": 52078, + "RemainingStorageSpacePercentage": 22 + }, + "License": { + "Type": "Enterprise", + "ExpirationLeftInSec": null, + "UtilizedCpuCores": 8, + "MaxCores": 256 + }, + "Network": { + "TcpActiveConnections": 84, + "ConcurrentRequestsCount": 1, + "TotalRequests": 3, + "RequestsPerSec": 0.03322, + "LastRequestTimeInSec": null, + "LastAuthorizedNonClusterAdminRequestTimeInSec": null + }, + "Certificate": { + "ServerCertificateExpirationLeftInSec": null, + "WellKnownAdminCertificates": null + }, + "Cluster": { + "NodeTag": "A", + "NodeState": "Leader", + "CurrentTerm": 28, + "Index": 104, + "Id": "6b535a18-558f-4e53-a479-a514efc16aab" + }, + "Databases": { + "TotalCount": 25, + "LoadedCount": 2 + } +} diff --git a/plugins/inputs/redfish/README.md b/plugins/inputs/redfish/README.md index a22b9d3141741..cabf7e088047b 100644 --- a/plugins/inputs/redfish/README.md +++ b/plugins/inputs/redfish/README.md @@ -34,6 +34,7 @@ Telegraf minimum version: Telegraf 1.15.0 - redfish_thermal_temperatures - tags: - source + - member_id - address - name - datacenter (available only if location data is found) @@ -53,6 +54,7 @@ Telegraf minimum version: Telegraf 1.15.0 + redfish_thermal_fans - tags: - source + - member_id - address - name - datacenter (available only if location data is found) @@ -73,6 +75,7 @@ Telegraf minimum version: Telegraf 1.15.0 - tags: - source - address + - member_id - name - datacenter (available only if location data is found) - rack (available only if location data is found) @@ -92,6 +95,7 @@ Telegraf minimum version: Telegraf 1.15.0 - tags: - source - address + - member_id - name - datacenter (available only if location data is found) - rack (available only if location data is found) @@ -110,18 +114,18 @@ Telegraf minimum version: Telegraf 1.15.0 ### Example Output ``` -redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=SystemBoardExhaust,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=33,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17720,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1B,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17760,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan2A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17880,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_powersupplies,source=test-hostname,name=PS1Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=208,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 -redfish_power_powersupplies,source=test-hostname,name=PS2Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=194,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM347,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=PS1voltage1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=208,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=SystemBoardExhaust,address=http://190.0.0.1,member_id="3"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=33,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1A,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17720,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1B,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17760,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan2A,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17880,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_powersupplies,source=test-hostname,name=PS1Status,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=208,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 +redfish_power_powersupplies,source=test-hostname,name=PS2Status,address=http://190.0.0.1,member_id="1",datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=194,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM347,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=PS1voltage1,address=http://190.0.0.1,member_id="12"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=208,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 ``` diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index 54d1d15b8c097..dcf26b192c651 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -3,7 +3,7 @@ package redfish import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -43,7 +43,7 @@ type Redfish struct { Address string `toml:"address"` Username string `toml:"username"` Password string `toml:"password"` - ComputerSystemId string `toml:"computer_system_id"` + ComputerSystemID string `toml:"computer_system_id"` Timeout config.Duration `toml:"timeout"` client http.Client @@ -73,6 +73,7 @@ type Chassis struct { type Power struct { PowerSupplies []struct { Name string + MemberID string PowerInputWatts *float64 PowerCapacityWatts *float64 PowerOutputWatts *float64 @@ -82,6 +83,7 @@ type Power struct { } Voltages []struct { Name string + MemberID string ReadingVolts *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -94,6 +96,7 @@ type Power struct { type Thermal struct { Fans []struct { Name string + MemberID string Reading *int64 ReadingUnits *string UpperThresholdCritical *int64 @@ -104,6 +107,7 @@ type Thermal struct { } Temperatures []struct { Name string + MemberID string ReadingCelsius *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -146,7 +150,7 @@ func (r *Redfish) Init() error { return fmt.Errorf("did not provide username and password") } - if r.ComputerSystemId == "" { + if r.ComputerSystemID == "" { return fmt.Errorf("did not provide the computer system ID of the resource") } @@ -181,6 +185,7 @@ func (r *Redfish) getData(url string, payload interface{}) error { req.SetBasicAuth(r.Username, r.Password) req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") + req.Header.Set("OData-Version", "4.0") resp, err := r.client.Do(req) if err != nil { return err @@ -188,12 +193,13 @@ func (r *Redfish) getData(url string, payload interface{}) error { defer resp.Body.Close() if resp.StatusCode != 200 { - return fmt.Errorf("received status code %d (%s), expected 200", + return fmt.Errorf("received status code %d (%s) for address %s, expected 200", resp.StatusCode, - http.StatusText(resp.StatusCode)) + http.StatusText(resp.StatusCode), + r.Address) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -252,7 +258,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { address = r.baseURL.Host } - system, err := r.getComputerSystem(r.ComputerSystemId) + system, err := r.getComputerSystem(r.ComputerSystemID) if err != nil { return err } @@ -270,6 +276,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Temperatures { tags := map[string]string{} + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -294,6 +301,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Fans { tags := map[string]string{} fields := make(map[string]interface{}) + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -325,6 +333,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.PowerSupplies { tags := map[string]string{} + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -348,6 +357,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.Voltages { tags := map[string]string{} + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 8821b3d97557f..4cbbb045302c1 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -8,15 +8,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestDellApis(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -43,11 +42,12 @@ func TestDellApis(t *testing.T) { address, _, err := net.SplitHostPort(u.Host) require.NoError(t, err) - expected_metrics := []telegraf.Metric{ + expectedMetrics := []telegraf.Metric{ testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ "name": "CPU1 Temp", + "member_id": "iDRAC.Embedded.1#CPU1Temp", "source": "tpa-hostname", "address": address, "datacenter": "", @@ -71,6 +71,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan1A", + "member_id": "0x17||Fan.Embedded.1A", "address": address, "datacenter": "", "health": "OK", @@ -91,6 +92,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan1B", + "member_id": "0x17||Fan.Embedded.1B", "address": address, "datacenter": "", "health": "OK", @@ -111,6 +113,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan2A", + "member_id": "0x17||Fan.Embedded.2A", "address": address, "datacenter": "", "health": "OK", @@ -131,6 +134,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan2B", + "member_id": "0x17||Fan.Embedded.2B", "address": address, "datacenter": "", "health": "OK", @@ -151,6 +155,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan3A", + "member_id": "0x17||Fan.Embedded.3A", "address": address, "datacenter": "", "health": "OK", @@ -171,6 +176,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan3B", + "member_id": "0x17||Fan.Embedded.3B", "address": address, "datacenter": "", "health": "OK", @@ -191,6 +197,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan4A", + "member_id": "0x17||Fan.Embedded.4A", "address": address, "datacenter": "", "health": "OK", @@ -211,6 +218,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan4B", + "member_id": "0x17||Fan.Embedded.4B", "address": address, "datacenter": "", "health": "OK", @@ -231,6 +239,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan5A", + "member_id": "0x17||Fan.Embedded.5A", "address": address, "datacenter": "", "health": "OK", @@ -251,6 +260,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan5B", + "member_id": "0x17||Fan.Embedded.5B", "address": address, "datacenter": "", "health": "OK", @@ -271,6 +281,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan6A", + "member_id": "0x17||Fan.Embedded.6A", "address": address, "datacenter": "", "health": "OK", @@ -291,6 +302,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan6B", + "member_id": "0x17||Fan.Embedded.6B", "address": address, "datacenter": "", "health": "OK", @@ -311,6 +323,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan7A", + "member_id": "0x17||Fan.Embedded.7A", "address": address, "datacenter": "", "health": "OK", @@ -331,6 +344,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan7B", + "member_id": "0x17||Fan.Embedded.7B", "address": address, "datacenter": "", "health": "OK", @@ -351,6 +365,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan8A", + "member_id": "0x17||Fan.Embedded.8A", "address": address, "datacenter": "", "health": "OK", @@ -371,6 +386,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan8B", + "member_id": "0x17||Fan.Embedded.8B", "address": address, "datacenter": "", "health": "OK", @@ -391,6 +407,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "PS1 Status", + "member_id": "PSU.Slot.1", "address": address, "datacenter": "", "health": "OK", @@ -412,6 +429,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board DIMM PG", + "member_id": "iDRAC.Embedded.1#SystemBoardDIMMPG", "address": address, "datacenter": "", "health": "OK", @@ -430,6 +448,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board NDC PG", + "member_id": "iDRAC.Embedded.1#SystemBoardNDCPG", "address": address, "datacenter": "", "health": "OK", @@ -449,6 +468,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board PS1 PG FAIL", + "member_id": "iDRAC.Embedded.1#SystemBoardPS1PGFAIL", "address": address, "datacenter": "", "health": "OK", @@ -467,22 +487,20 @@ func TestDellApis(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err = plugin.Gather(&acc) require.NoError(t, err) require.True(t, acc.HasMeasurement("redfish_thermal_temperatures")) - testutil.RequireMetricsEqual(t, expected_metrics, acc.GetTelegrafMetrics(), + testutil.RequireMetricsEqual(t, expectedMetrics, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestHPApis(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -509,15 +527,16 @@ func TestHPApis(t *testing.T) { address, _, err := net.SplitHostPort(u.Host) require.NoError(t, err) - expected_metrics_hp := []telegraf.Metric{ + expectedMetricsHp := []telegraf.Metric{ testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ - "name": "01-Inlet Ambient", - "source": "tpa-hostname", - "address": address, - "health": "OK", - "state": "Enabled", + "name": "01-Inlet Ambient", + "member_id": "0", + "source": "tpa-hostname", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_celsius": 19.0, @@ -529,11 +548,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ - "name": "44-P/S 2 Zone", - "source": "tpa-hostname", - "address": address, - "health": "OK", - "state": "Enabled", + "name": "44-P/S 2 Zone", + "source": "tpa-hostname", + "member_id": "42", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_celsius": 34.0, @@ -545,11 +565,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 1", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 1", + "member_id": "0", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -559,11 +580,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 2", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 2", + "member_id": "1", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -573,11 +595,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 3", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 3", + "member_id": "2", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -587,11 +610,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_power_powersupplies", map[string]string{ - "source": "tpa-hostname", - "name": "HpeServerPowerSupply", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "HpeServerPowerSupply", + "member_id": "0", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "power_capacity_watts": 800.0, @@ -603,11 +627,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_power_powersupplies", map[string]string{ - "source": "tpa-hostname", - "name": "HpeServerPowerSupply", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "HpeServerPowerSupply", + "member_id": "1", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "power_capacity_watts": 800.0, @@ -618,19 +643,19 @@ func TestHPApis(t *testing.T) { ), } - hp_plugin := &Redfish{ + hpPlugin := &Redfish{ Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "1", + ComputerSystemID: "1", } - hp_plugin.Init() - var hp_acc testutil.Accumulator + require.NoError(t, hpPlugin.Init()) + var hpAcc testutil.Accumulator - err = hp_plugin.Gather(&hp_acc) + err = hpPlugin.Gather(&hpAcc) require.NoError(t, err) - require.True(t, hp_acc.HasMeasurement("redfish_thermal_temperatures")) - testutil.RequireMetricsEqual(t, expected_metrics_hp, hp_acc.GetTelegrafMetrics(), + require.True(t, hpAcc.HasMeasurement("redfish_thermal_temperatures")) + testutil.RequireMetricsEqual(t, expectedMetricsHp, hpAcc.GetTelegrafMetrics(), testutil.IgnoreTime()) } @@ -642,26 +667,8 @@ func checkAuth(r *http.Request, username, password string) bool { return user == username && pass == password } -func TestConnection(t *testing.T) { - - r := &Redfish{ - Address: "http://127.0.0.1", - Username: "test", - Password: "test", - ComputerSystemId: "System.Embedded.1", - } - - var acc testutil.Accumulator - r.Init() - err := r.Gather(&acc) - require.Error(t, err) - require.Contains(t, err.Error(), "connect: connection refused") -} - func TestInvalidUsernameorPassword(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -680,19 +687,18 @@ func TestInvalidUsernameorPassword(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } var acc testutil.Accumulator - r.Init() - err := r.Gather(&acc) - require.Error(t, err) - require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.NoError(t, r.Init()) + u, err := url.Parse(ts.URL) + require.NoError(t, err) + err = r.Gather(&acc) + require.EqualError(t, err, "received status code 401 (Unauthorized) for address http://"+u.Host+", expected 200") } func TestNoUsernameorPasswordConfiguration(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -709,7 +715,7 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) { r := &Redfish{ Address: ts.URL, - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } err := r.Init() @@ -718,7 +724,6 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) { } func TestInvalidDellJSON(t *testing.T) { - tests := []struct { name string thermalfilename string @@ -757,7 +762,6 @@ func TestInvalidDellJSON(t *testing.T) { } for _, tt := range tests { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -782,10 +786,10 @@ func TestInvalidDellJSON(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err := plugin.Gather(&acc) @@ -795,7 +799,6 @@ func TestInvalidDellJSON(t *testing.T) { } func TestInvalidHPJSON(t *testing.T) { - tests := []struct { name string thermalfilename string @@ -828,7 +831,6 @@ func TestInvalidHPJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -853,10 +855,10 @@ func TestInvalidHPJSON(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.2", + ComputerSystemID: "System.Embedded.2", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err := plugin.Gather(&acc) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index c8f343b262aca..bd89ea75346b2 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -10,15 +10,21 @@ ## e.g. ## tcp://localhost:6379 ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis.sock ## ## If no servers are specified, then localhost is used as the host. ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" @@ -63,7 +69,7 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - maxmemory_policy(string) - mem_fragmentation_ratio(float, number) - **Persistance** + **Persistence** - loading(int,flag) - rdb_changes_since_last_save(int, number) - rdb_bgsave_in_progress(int, flag) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 3a76a351c05de..b66d4ea41d36b 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "net/url" + "reflect" "regexp" "strconv" "strings" @@ -31,8 +32,8 @@ type Redis struct { Log telegraf.Logger - clients []Client - initialized bool + clients []Client + connected bool } type Client interface { @@ -46,6 +47,117 @@ type RedisClient struct { tags map[string]string } +// RedisFieldTypes defines the types expected for each of the fields redis reports on +type RedisFieldTypes struct { + ActiveDefragHits int64 `json:"active_defrag_hits"` + ActiveDefragKeyHits int64 `json:"active_defrag_key_hits"` + ActiveDefragKeyMisses int64 `json:"active_defrag_key_misses"` + ActiveDefragMisses int64 `json:"active_defrag_misses"` + ActiveDefragRunning int64 `json:"active_defrag_running"` + AllocatorActive int64 `json:"allocator_active"` + AllocatorAllocated int64 `json:"allocator_allocated"` + AllocatorFragBytes float64 `json:"allocator_frag_bytes"` // for historical reasons this was left as float although redis reports it as an int + AllocatorFragRatio float64 `json:"allocator_frag_ratio"` + AllocatorResident int64 `json:"allocator_resident"` + AllocatorRssBytes int64 `json:"allocator_rss_bytes"` + AllocatorRssRatio float64 `json:"allocator_rss_ratio"` + AofCurrentRewriteTimeSec int64 `json:"aof_current_rewrite_time_sec"` + AofEnabled int64 `json:"aof_enabled"` + AofLastBgrewriteStatus string `json:"aof_last_bgrewrite_status"` + AofLastCowSize int64 `json:"aof_last_cow_size"` + AofLastRewriteTimeSec int64 `json:"aof_last_rewrite_time_sec"` + AofLastWriteStatus string `json:"aof_last_write_status"` + AofRewriteInProgress int64 `json:"aof_rewrite_in_progress"` + AofRewriteScheduled int64 `json:"aof_rewrite_scheduled"` + BlockedClients int64 `json:"blocked_clients"` + ClientRecentMaxInputBuffer int64 `json:"client_recent_max_input_buffer"` + ClientRecentMaxOutputBuffer int64 `json:"client_recent_max_output_buffer"` + Clients int64 `json:"clients"` + ClientsInTimeoutTable int64 `json:"clients_in_timeout_table"` + ClusterEnabled int64 `json:"cluster_enabled"` + ConnectedSlaves int64 `json:"connected_slaves"` + EvictedKeys int64 `json:"evicted_keys"` + ExpireCycleCPUMilliseconds int64 `json:"expire_cycle_cpu_milliseconds"` + ExpiredKeys int64 `json:"expired_keys"` + ExpiredStalePerc float64 `json:"expired_stale_perc"` + ExpiredTimeCapReachedCount int64 `json:"expired_time_cap_reached_count"` + InstantaneousInputKbps float64 `json:"instantaneous_input_kbps"` + InstantaneousOpsPerSec int64 `json:"instantaneous_ops_per_sec"` + InstantaneousOutputKbps float64 `json:"instantaneous_output_kbps"` + IoThreadedReadsProcessed int64 `json:"io_threaded_reads_processed"` + IoThreadedWritesProcessed int64 `json:"io_threaded_writes_processed"` + KeyspaceHits int64 `json:"keyspace_hits"` + KeyspaceMisses int64 `json:"keyspace_misses"` + LatestForkUsec int64 `json:"latest_fork_usec"` + LazyfreePendingObjects int64 `json:"lazyfree_pending_objects"` + Loading int64 `json:"loading"` + LruClock int64 `json:"lru_clock"` + MasterReplOffset int64 `json:"master_repl_offset"` + MaxMemory int64 `json:"maxmemory"` + MaxMemoryPolicy string `json:"maxmemory_policy"` + MemAofBuffer int64 `json:"mem_aof_buffer"` + MemClientsNormal int64 `json:"mem_clients_normal"` + MemClientsSlaves int64 `json:"mem_clients_slaves"` + MemFragmentationBytes int64 `json:"mem_fragmentation_bytes"` + MemFragmentationRatio float64 `json:"mem_fragmentation_ratio"` + MemNotCountedForEvict int64 `json:"mem_not_counted_for_evict"` + MemReplicationBacklog int64 `json:"mem_replication_backlog"` + MigrateCachedSockets int64 `json:"migrate_cached_sockets"` + ModuleForkInProgress int64 `json:"module_fork_in_progress"` + ModuleForkLastCowSize int64 `json:"module_fork_last_cow_size"` + NumberOfCachedScripts int64 `json:"number_of_cached_scripts"` + PubsubChannels int64 `json:"pubsub_channels"` + PubsubPatterns int64 `json:"pubsub_patterns"` + RdbBgsaveInProgress int64 `json:"rdb_bgsave_in_progress"` + RdbChangesSinceLastSave int64 `json:"rdb_changes_since_last_save"` + RdbCurrentBgsaveTimeSec int64 `json:"rdb_current_bgsave_time_sec"` + RdbLastBgsaveStatus string `json:"rdb_last_bgsave_status"` + RdbLastBgsaveTimeSec int64 `json:"rdb_last_bgsave_time_sec"` + RdbLastCowSize int64 `json:"rdb_last_cow_size"` + RdbLastSaveTime int64 `json:"rdb_last_save_time"` + RdbLastSaveTimeElapsed int64 `json:"rdb_last_save_time_elapsed"` + RedisVersion string `json:"redis_version"` + RejectedConnections int64 `json:"rejected_connections"` + ReplBacklogActive int64 `json:"repl_backlog_active"` + ReplBacklogFirstByteOffset int64 `json:"repl_backlog_first_byte_offset"` + ReplBacklogHistlen int64 `json:"repl_backlog_histlen"` + ReplBacklogSize int64 `json:"repl_backlog_size"` + RssOverheadBytes int64 `json:"rss_overhead_bytes"` + RssOverheadRatio float64 `json:"rss_overhead_ratio"` + SecondReplOffset int64 `json:"second_repl_offset"` + SlaveExpiresTrackedKeys int64 `json:"slave_expires_tracked_keys"` + SyncFull int64 `json:"sync_full"` + SyncPartialErr int64 `json:"sync_partial_err"` + SyncPartialOk int64 `json:"sync_partial_ok"` + TotalCommandsProcessed int64 `json:"total_commands_processed"` + TotalConnectionsReceived int64 `json:"total_connections_received"` + TotalNetInputBytes int64 `json:"total_net_input_bytes"` + TotalNetOutputBytes int64 `json:"total_net_output_bytes"` + TotalReadsProcessed int64 `json:"total_reads_processed"` + TotalSystemMemory int64 `json:"total_system_memory"` + TotalWritesProcessed int64 `json:"total_writes_processed"` + TrackingClients int64 `json:"tracking_clients"` + TrackingTotalItems int64 `json:"tracking_total_items"` + TrackingTotalKeys int64 `json:"tracking_total_keys"` + TrackingTotalPrefixes int64 `json:"tracking_total_prefixes"` + UnexpectedErrorReplies int64 `json:"unexpected_error_replies"` + Uptime int64 `json:"uptime"` + UsedCPUSys float64 `json:"used_cpu_sys"` + UsedCPUSysChildren float64 `json:"used_cpu_sys_children"` + UsedCPUUser float64 `json:"used_cpu_user"` + UsedCPUUserChildren float64 `json:"used_cpu_user_children"` + UsedMemory int64 `json:"used_memory"` + UsedMemoryDataset int64 `json:"used_memory_dataset"` + UsedMemoryDatasetPerc float64 `json:"used_memory_dataset_perc"` + UsedMemoryLua int64 `json:"used_memory_lua"` + UsedMemoryOverhead int64 `json:"used_memory_overhead"` + UsedMemoryPeak int64 `json:"used_memory_peak"` + UsedMemoryPeakPerc float64 `json:"used_memory_peak_perc"` + UsedMemoryRss int64 `json:"used_memory_rss"` + UsedMemoryScripts int64 `json:"used_memory_scripts"` + UsedMemoryStartup int64 `json:"used_memory_startup"` +} + func (r *RedisClient) Do(returnType string, args ...interface{}) (interface{}, error) { rawVal := r.client.Do(args...) @@ -89,9 +201,13 @@ var sampleConfig = ` ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" @@ -118,8 +234,18 @@ var Tracking = map[string]string{ "role": "replication_role", } -func (r *Redis) init(acc telegraf.Accumulator) error { - if r.initialized { +func (r *Redis) Init() error { + for _, command := range r.Commands { + if command.Type != "string" && command.Type != "integer" && command.Type != "float" { + return fmt.Errorf(`unknown result type: expected one of "string", "integer", "float"; got %q`, command.Type) + } + } + + return nil +} + +func (r *Redis) connect() error { + if r.connected { return nil } @@ -187,15 +313,15 @@ func (r *Redis) init(acc telegraf.Accumulator) error { } } - r.initialized = true + r.connected = true return nil } // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (r *Redis) Gather(acc telegraf.Accumulator) error { - if !r.initialized { - err := r.init(acc) + if !r.connected { + err := r.connect() if err != nil { return err } @@ -221,6 +347,10 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err for _, command := range r.Commands { val, err := client.Do(command.Type, command.Command...) if err != nil { + if strings.Contains(err.Error(), "unexpected type=") { + return fmt.Errorf("could not get command result: %s", err) + } + return err } @@ -249,7 +379,7 @@ func gatherInfoOutput( tags map[string]string, ) error { var section string - var keyspace_hits, keyspace_misses int64 + var keyspaceHits, keyspaceMisses int64 scanner := bufio.NewScanner(rdr) fields := make(map[string]interface{}) @@ -271,7 +401,7 @@ func gatherInfoOutput( if len(parts) < 2 { continue } - name := string(parts[0]) + name := parts[0] if section == "Server" { if name != "lru_clock" && name != "uptime_in_seconds" && name != "redis_version" { @@ -294,7 +424,7 @@ func gatherInfoOutput( metric, ok := Tracking[name] if !ok { if section == "Keyspace" { - kline := strings.TrimSpace(string(parts[1])) + kline := strings.TrimSpace(parts[1]) gatherKeyspaceLine(name, kline, acc, tags) continue } @@ -321,9 +451,9 @@ func gatherInfoOutput( if ival, err := strconv.ParseInt(val, 10, 64); err == nil { switch name { case "keyspace_hits": - keyspace_hits = ival + keyspaceHits = ival case "keyspace_misses": - keyspace_misses = ival + keyspaceMisses = ival case "rdb_last_save_time": // influxdb can't calculate this, so we have to do it fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival @@ -347,11 +477,17 @@ func gatherInfoOutput( fields[metric] = val } - var keyspace_hitrate float64 = 0.0 - if keyspace_hits != 0 || keyspace_misses != 0 { - keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses) + var keyspaceHitrate float64 + if keyspaceHits != 0 || keyspaceMisses != 0 { + keyspaceHitrate = float64(keyspaceHits) / float64(keyspaceHits+keyspaceMisses) } - fields["keyspace_hitrate"] = keyspace_hitrate + fields["keyspace_hitrate"] = keyspaceHitrate + + o := RedisFieldTypes{} + + setStructFieldsFromObject(fields, &o) + setExistingFieldsFromStruct(fields, &o) + acc.AddFields("redis", fields, tags) return nil } @@ -364,12 +500,12 @@ func gatherKeyspaceLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { if strings.Contains(line, "keys=") { fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } tags["database"] = name @@ -393,7 +529,7 @@ func gatherCommandstateLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { if !strings.HasPrefix(name, "cmdstat") { return @@ -401,7 +537,7 @@ func gatherCommandstateLine( fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } tags["command"] = strings.TrimPrefix(name, "cmdstat_") @@ -438,11 +574,11 @@ func gatherReplicationLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } @@ -479,3 +615,115 @@ func init() { return &Redis{} }) } + +func setExistingFieldsFromStruct(fields map[string]interface{}, o *RedisFieldTypes) { + val := reflect.ValueOf(o).Elem() + typ := val.Type() + + for key := range fields { + if _, exists := fields[key]; exists { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + jsonFieldName := f.Tag.Get("json") + if jsonFieldName == key { + fields[key] = val.Field(i).Interface() + break + } + } + } + } +} + +func setStructFieldsFromObject(fields map[string]interface{}, o *RedisFieldTypes) { + val := reflect.ValueOf(o).Elem() + typ := val.Type() + + for key, value := range fields { + if _, exists := fields[key]; exists { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + jsonFieldName := f.Tag.Get("json") + if jsonFieldName == key { + structFieldValue := val.Field(i) + structFieldValue.Set(coerceType(value, structFieldValue.Type())) + break + } + } + } + } +} + +func coerceType(value interface{}, typ reflect.Type) reflect.Value { + switch sourceType := value.(type) { + case bool: + switch typ.Kind() { + case reflect.String: + if sourceType { + value = "true" + } else { + value = "false" + } + case reflect.Int64: + if sourceType { + value = int64(1) + } else { + value = int64(0) + } + case reflect.Float64: + if sourceType { + value = float64(1) + } else { + value = float64(0) + } + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case int, int8, int16, int32, int64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%d", value) + case reflect.Int64: + // types match + case reflect.Float64: + value = float64(reflect.ValueOf(sourceType).Int()) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case uint, uint8, uint16, uint32, uint64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%d", value) + case reflect.Int64: + // types match + case reflect.Float64: + value = float64(reflect.ValueOf(sourceType).Uint()) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case float32, float64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%f", value) + case reflect.Int64: + value = int64(reflect.ValueOf(sourceType).Float()) + case reflect.Float64: + // types match + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case string: + switch typ.Kind() { + case reflect.String: + // types match + case reflect.Int64: + value, _ = strconv.ParseInt(value.(string), 10, 64) + case reflect.Float64: + value, _ = strconv.ParseFloat(value.(string), 64) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + default: + panic(fmt.Sprintf("unhandled source type %T", sourceType)) + } + return reflect.ValueOf(value) +} diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index d5aaa7a7bfa38..6f8abbda6be0c 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -24,11 +24,11 @@ func (t *testClient) Info() *redis.StringCmd { return nil } -func (t *testClient) Do(returnType string, args ...interface{}) (interface{}, error) { +func (t *testClient) Do(_ string, _ ...interface{}) (interface{}, error) { return 2, nil } -func TestRedisConnect(t *testing.T) { +func TestRedisConnectIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -83,62 +83,115 @@ func TestRedis_ParseMetrics(t *testing.T) { tags = map[string]string{"host": "redis.net", "replication_role": "master"} fields := map[string]interface{}{ - "uptime": int64(238), - "lru_clock": int64(2364819), - "clients": int64(1), - "client_longest_output_list": int64(0), - "client_biggest_input_buf": int64(0), - "blocked_clients": int64(0), - "used_memory": int64(1003936), - "used_memory_rss": int64(811008), - "used_memory_peak": int64(1003936), - "used_memory_lua": int64(33792), - "used_memory_peak_perc": float64(93.58), - "used_memory_dataset_perc": float64(20.27), - "mem_fragmentation_ratio": float64(0.81), - "loading": int64(0), - "rdb_changes_since_last_save": int64(0), - "rdb_bgsave_in_progress": int64(0), - "rdb_last_save_time": int64(1428427941), - "rdb_last_bgsave_status": "ok", - "rdb_last_bgsave_time_sec": int64(-1), - "rdb_current_bgsave_time_sec": int64(-1), - "aof_enabled": int64(0), - "aof_rewrite_in_progress": int64(0), - "aof_rewrite_scheduled": int64(0), - "aof_last_rewrite_time_sec": int64(-1), - "aof_current_rewrite_time_sec": int64(-1), - "aof_last_bgrewrite_status": "ok", - "aof_last_write_status": "ok", - "total_connections_received": int64(2), - "total_commands_processed": int64(1), - "instantaneous_ops_per_sec": int64(0), - "instantaneous_input_kbps": float64(876.16), - "instantaneous_output_kbps": float64(3010.23), - "rejected_connections": int64(0), - "sync_full": int64(0), - "sync_partial_ok": int64(0), - "sync_partial_err": int64(0), - "expired_keys": int64(0), - "evicted_keys": int64(0), - "keyspace_hits": int64(1), - "keyspace_misses": int64(1), - "pubsub_channels": int64(0), - "pubsub_patterns": int64(0), - "latest_fork_usec": int64(0), - "connected_slaves": int64(2), - "master_repl_offset": int64(0), - "repl_backlog_active": int64(0), - "repl_backlog_size": int64(1048576), - "repl_backlog_first_byte_offset": int64(0), - "repl_backlog_histlen": int64(0), - "second_repl_offset": int64(-1), - "used_cpu_sys": float64(0.14), - "used_cpu_user": float64(0.05), - "used_cpu_sys_children": float64(0.00), - "used_cpu_user_children": float64(0.00), - "keyspace_hitrate": float64(0.50), - "redis_version": "2.8.9", + "uptime": int64(238), + "lru_clock": int64(2364819), + "clients": int64(1), + "client_longest_output_list": int64(0), + "client_biggest_input_buf": int64(0), + "blocked_clients": int64(0), + "used_memory": int64(1003936), + "used_memory_rss": int64(811008), + "used_memory_peak": int64(1003936), + "used_memory_lua": int64(33792), + "used_memory_peak_perc": float64(93.58), + "used_memory_dataset_perc": float64(20.27), + "mem_fragmentation_ratio": float64(0.81), + "loading": int64(0), + "rdb_changes_since_last_save": int64(0), + "rdb_bgsave_in_progress": int64(0), + "rdb_last_save_time": int64(1428427941), + "rdb_last_bgsave_status": "ok", + "rdb_last_bgsave_time_sec": int64(-1), + "rdb_current_bgsave_time_sec": int64(-1), + "aof_enabled": int64(0), + "aof_rewrite_in_progress": int64(0), + "aof_rewrite_scheduled": int64(0), + "aof_last_rewrite_time_sec": int64(-1), + "aof_current_rewrite_time_sec": int64(-1), + "aof_last_bgrewrite_status": "ok", + "aof_last_write_status": "ok", + "total_connections_received": int64(2), + "total_commands_processed": int64(1), + "instantaneous_ops_per_sec": int64(0), + "instantaneous_input_kbps": float64(876.16), + "instantaneous_output_kbps": float64(3010.23), + "rejected_connections": int64(0), + "sync_full": int64(0), + "sync_partial_ok": int64(0), + "sync_partial_err": int64(0), + "expired_keys": int64(0), + "evicted_keys": int64(0), + "keyspace_hits": int64(1), + "keyspace_misses": int64(1), + "pubsub_channels": int64(0), + "pubsub_patterns": int64(0), + "latest_fork_usec": int64(0), + "connected_slaves": int64(2), + "master_repl_offset": int64(0), + "repl_backlog_active": int64(0), + "repl_backlog_size": int64(1048576), + "repl_backlog_first_byte_offset": int64(0), + "repl_backlog_histlen": int64(0), + "second_repl_offset": int64(-1), + "used_cpu_sys": float64(0.14), + "used_cpu_user": float64(0.05), + "used_cpu_sys_children": float64(0.00), + "used_cpu_user_children": float64(0.00), + "keyspace_hitrate": float64(0.50), + "redis_version": "6.0.9", + "active_defrag_hits": int64(0), + "active_defrag_key_hits": int64(0), + "active_defrag_key_misses": int64(0), + "active_defrag_misses": int64(0), + "active_defrag_running": int64(0), + "allocator_active": int64(1022976), + "allocator_allocated": int64(1019632), + "allocator_frag_bytes": float64(3344), + "allocator_frag_ratio": float64(1.00), + "allocator_resident": int64(1022976), + "allocator_rss_bytes": int64(0), + "allocator_rss_ratio": float64(1.00), + "aof_last_cow_size": int64(0), + "client_recent_max_input_buffer": int64(16), + "client_recent_max_output_buffer": int64(0), + "clients_in_timeout_table": int64(0), + "cluster_enabled": int64(0), + "expire_cycle_cpu_milliseconds": int64(669), + "expired_stale_perc": float64(0.00), + "expired_time_cap_reached_count": int64(0), + "io_threaded_reads_processed": int64(0), + "io_threaded_writes_processed": int64(0), + "total_reads_processed": int64(31), + "total_writes_processed": int64(17), + "lazyfree_pending_objects": int64(0), + "maxmemory": int64(0), + "maxmemory_policy": string("noeviction"), + "mem_aof_buffer": int64(0), + "mem_clients_normal": int64(17440), + "mem_clients_slaves": int64(0), + "mem_fragmentation_bytes": int64(41232), + "mem_not_counted_for_evict": int64(0), + "mem_replication_backlog": int64(0), + "rss_overhead_bytes": int64(37888), + "rss_overhead_ratio": float64(1.04), + "total_system_memory": int64(17179869184), + "used_memory_dataset": int64(47088), + "used_memory_overhead": int64(1019152), + "used_memory_scripts": int64(0), + "used_memory_startup": int64(1001712), + "migrate_cached_sockets": int64(0), + "module_fork_in_progress": int64(0), + "module_fork_last_cow_size": int64(0), + "number_of_cached_scripts": int64(0), + "rdb_last_cow_size": int64(0), + "slave_expires_tracked_keys": int64(0), + "unexpected_error_replies": int64(0), + "total_net_input_bytes": int64(381), + "total_net_output_bytes": int64(71521), + "tracking_clients": int64(0), + "tracking_total_items": int64(0), + "tracking_total_keys": int64(0), + "tracking_total_prefixes": int64(0), } // We have to test rdb_last_save_time_offset manually because the value is based on the time when gathered @@ -210,26 +263,110 @@ func TestRedis_ParseMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags) } +func TestRedis_ParseFloatOnInts(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "mem_fragmentation_ratio:0.81", "mem_fragmentation_ratio:1", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["mem_fragmentation_ratio"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + fragRatio, ok := m.Fields["mem_fragmentation_ratio"] + require.True(t, ok) + require.IsType(t, float64(0.0), fragRatio) +} + +func TestRedis_ParseIntOnFloats(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "clients_in_timeout_table:0", "clients_in_timeout_table:0.0", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["clients_in_timeout_table"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + clientsInTimeout, ok := m.Fields["clients_in_timeout_table"] + require.True(t, ok) + require.IsType(t, int64(0), clientsInTimeout) +} + +func TestRedis_ParseStringOnInts(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "maxmemory_policy:no-eviction", "maxmemory_policy:1", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["maxmemory_policy"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + maxmemoryPolicy, ok := m.Fields["maxmemory_policy"] + require.True(t, ok) + require.IsType(t, string(""), maxmemoryPolicy) +} + +func TestRedis_ParseIntOnString(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "clients_in_timeout_table:0", `clients_in_timeout_table:""`, 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["clients_in_timeout_table"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + clientsInTimeout, ok := m.Fields["clients_in_timeout_table"] + require.True(t, ok) + require.IsType(t, int64(0), clientsInTimeout) +} + const testOutput = `# Server -redis_version:2.8.9 +redis_version:6.0.9 redis_git_sha1:00000000 redis_git_dirty:0 -redis_build_id:9ccc8119ea98f6e1 +redis_build_id:26c3229b35eb3beb redis_mode:standalone -os:Darwin 14.1.0 x86_64 +os:Darwin 19.6.0 x86_64 arch_bits:64 multiplexing_api:kqueue +atomicvar_api:atomic-builtin gcc_version:4.2.1 -process_id:40235 -run_id:37d020620aadf0627282c0f3401405d774a82664 +process_id:46677 +run_id:5d6bf38087b23e48f1a59b7aca52e2b55438b02f tcp_port:6379 uptime_in_seconds:238 uptime_in_days:0 hz:10 +configured_hz:10 lru_clock:2364819 +executable:/usr/local/opt/redis/bin/redis-server config_file:/usr/local/etc/redis.conf +io_threads_active:0 # Clients +client_recent_max_input_buffer:16 +client_recent_max_output_buffer:0 +tracking_clients:0 +clients_in_timeout_table:0 connected_clients:1 client_longest_output_list:0 client_biggest_input_buf:0 @@ -239,13 +376,43 @@ blocked_clients:0 used_memory:1003936 used_memory_human:980.41K used_memory_rss:811008 +used_memory_rss_human:1.01M used_memory_peak:1003936 used_memory_peak_human:980.41K +used_memory_peak_perc:93.58% +used_memory_overhead:1019152 +used_memory_startup:1001712 +used_memory_dataset:47088 +used_memory_dataset_perc:20.27% +allocator_allocated:1019632 +allocator_active:1022976 +allocator_resident:1022976 +total_system_memory:17179869184 +total_system_memory_human:16.00G used_memory_lua:33792 +used_memory_lua_human:37.00K +used_memory_scripts:0 +used_memory_scripts_human:0B +number_of_cached_scripts:0 +maxmemory:0 +maxmemory_human:0B +maxmemory_policy:noeviction +allocator_frag_ratio:1.00 +allocator_frag_bytes:3344 +allocator_rss_ratio:1.00 +allocator_rss_bytes:0 +rss_overhead_ratio:1.04 +rss_overhead_bytes:37888 mem_fragmentation_ratio:0.81 +mem_fragmentation_bytes:41232 +mem_not_counted_for_evict:0 +mem_replication_backlog:0 +mem_clients_slaves:0 +mem_clients_normal:17440 +mem_aof_buffer:0 mem_allocator:libc -used_memory_peak_perc:93.58% -used_memory_dataset_perc:20.27% +active_defrag_running:0 +lazyfree_pending_objects:0 # Persistence loading:0 @@ -255,6 +422,7 @@ rdb_last_save_time:1428427941 rdb_last_bgsave_status:ok rdb_last_bgsave_time_sec:-1 rdb_current_bgsave_time_sec:-1 +rdb_last_cow_size:0 aof_enabled:0 aof_rewrite_in_progress:0 aof_rewrite_scheduled:0 @@ -262,11 +430,16 @@ aof_last_rewrite_time_sec:-1 aof_current_rewrite_time_sec:-1 aof_last_bgrewrite_status:ok aof_last_write_status:ok +aof_last_cow_size:0 +module_fork_in_progress:0 +module_fork_last_cow_size:0 # Stats total_connections_received:2 total_commands_processed:1 instantaneous_ops_per_sec:0 +total_net_input_bytes:381 +total_net_output_bytes:71521 instantaneous_input_kbps:876.16 instantaneous_output_kbps:3010.23 rejected_connections:0 @@ -274,12 +447,29 @@ sync_full:0 sync_partial_ok:0 sync_partial_err:0 expired_keys:0 +expired_stale_perc:0.00 +expired_time_cap_reached_count:0 +expire_cycle_cpu_milliseconds:669 evicted_keys:0 keyspace_hits:1 keyspace_misses:1 pubsub_channels:0 pubsub_patterns:0 latest_fork_usec:0 +migrate_cached_sockets:0 +slave_expires_tracked_keys:0 +active_defrag_hits:0 +active_defrag_misses:0 +active_defrag_key_hits:0 +active_defrag_key_misses:0 +tracking_total_keys:0 +tracking_total_items:0 +tracking_total_prefixes:0 +unexpected_error_replies:0 +total_reads_processed:31 +total_writes_processed:17 +io_threaded_reads_processed:0 +io_threaded_writes_processed:0 # Replication role:master @@ -301,6 +491,9 @@ used_cpu_user:0.05 used_cpu_sys_children:0.00 used_cpu_user_children:0.00 +# Cluster +cluster_enabled:0 + # Commandstats cmdstat_set:calls=261265,usec=1634157,usec_per_call=6.25 cmdstat_command:calls=1,usec=990,usec_per_call=990.00 @@ -308,5 +501,4 @@ cmdstat_command:calls=1,usec=990,usec_per_call=990.00 # Keyspace db0:keys=2,expires=0,avg_ttl=0 -(error) ERR unknown command 'eof' -` +(error) ERR unknown command 'eof'` diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index dc6b03620b153..a0108acf64df5 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -40,14 +40,13 @@ func (r *RethinkDB) Description() string { return "Read metrics from one or many RethinkDB servers" } -var localhost = &Server{Url: &url.URL{Host: "127.0.0.1:28015"}} +var localhost = &Server{URL: &url.URL{Host: "127.0.0.1:28015"}} // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { if len(r.Servers) == 0 { - r.gatherServer(localhost, acc) - return nil + return r.gatherServer(localhost, acc) } var wg sync.WaitGroup @@ -55,17 +54,17 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { for _, serv := range r.Servers { u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err)) + acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) continue } else if u.Scheme == "" { // fallback to simple string based address (i.e. "10.0.0.1:10000") u.Host = serv } wg.Add(1) - go func(serv string) { + go func() { defer wg.Done() - acc.AddError(r.gatherServer(&Server{Url: u}, acc)) - }(serv) + acc.AddError(r.gatherServer(&Server{URL: u}, acc)) + }() } wg.Wait() @@ -76,20 +75,20 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error { var err error connectOpts := gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, DiscoverHosts: false, } - if server.Url.User != nil { - pwd, set := server.Url.User.Password() + if server.URL.User != nil { + pwd, set := server.URL.User.Password() if set && pwd != "" { connectOpts.AuthKey = pwd connectOpts.HandshakeVersion = gorethink.HandshakeV0_4 } } - if server.Url.Scheme == "rethinkdb2" && server.Url.User != nil { - pwd, set := server.Url.User.Password() + if server.URL.Scheme == "rethinkdb2" && server.URL.User != nil { + pwd, set := server.URL.User.Password() if set && pwd != "" { - connectOpts.Username = server.Url.User.Username() + connectOpts.Username = server.URL.User.Username() connectOpts.Password = pwd connectOpts.HandshakeVersion = gorethink.HandshakeV1_0 } @@ -97,7 +96,7 @@ func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error server.session, err = gorethink.Connect(connectOpts) if err != nil { - return fmt.Errorf("Unable to connect to RethinkDB, %s\n", err.Error()) + return fmt.Errorf("unable to connect to RethinkDB, %s", err.Error()) } defer server.session.Close() diff --git a/plugins/inputs/rethinkdb/rethinkdb_data.go b/plugins/inputs/rethinkdb/rethinkdb_data.go index ca4ac75523455..159f6af9d992b 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data.go @@ -8,7 +8,7 @@ import ( ) type serverStatus struct { - Id string `gorethink:"id"` + ID string `gorethink:"id"` Network struct { Addresses []Address `gorethink:"canonical_addresses"` Hostname string `gorethink:"hostname"` @@ -41,7 +41,7 @@ type Engine struct { } type tableStatus struct { - Id string `gorethink:"id"` + ID string `gorethink:"id"` DB string `gorethink:"db"` Name string `gorethink:"name"` } diff --git a/plugins/inputs/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go index ce1d963b973fc..a0c5e4ba8ae57 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go @@ -59,14 +59,14 @@ func TestAddEngineStatsPartial(t *testing.T) { "written_docs_per_sec", } - missing_keys := []string{ + missingKeys := []string{ "total_queries", "total_reads", "total_writes", } engine.AddEngineStats(keys, &acc, tags) - for _, metric := range missing_keys { + for _, metric := range missingKeys { assert.False(t, acc.HasInt64Field("rethinkdb", metric)) } } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index c10605aa6d83e..ffb63e64106e2 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -15,31 +15,30 @@ import ( ) type Server struct { - Url *url.URL + URL *url.URL session *gorethink.Session serverStatus serverStatus } func (s *Server) gatherData(acc telegraf.Accumulator) error { if err := s.getServerStatus(); err != nil { - return fmt.Errorf("Failed to get server_status, %s\n", err) + return fmt.Errorf("failed to get server_status, %s", err) } if err := s.validateVersion(); err != nil { - return fmt.Errorf("Failed version validation, %s\n", err.Error()) + return fmt.Errorf("failed version validation, %s", err.Error()) } if err := s.addClusterStats(acc); err != nil { - fmt.Printf("error adding cluster stats, %s\n", err.Error()) - return fmt.Errorf("Error adding cluster stats, %s\n", err.Error()) + return fmt.Errorf("error adding cluster stats, %s", err.Error()) } if err := s.addMemberStats(acc); err != nil { - return fmt.Errorf("Error adding member stats, %s\n", err.Error()) + return fmt.Errorf("error adding member stats, %s", err.Error()) } if err := s.addTableStats(acc); err != nil { - return fmt.Errorf("Error adding table stats, %s\n", err.Error()) + return fmt.Errorf("error adding table stats, %s", err.Error()) } return nil @@ -58,7 +57,7 @@ func (s *Server) validateVersion() error { majorVersion, err := strconv.Atoi(strings.Split(versionString, "")[0]) if err != nil || majorVersion < 2 { - return fmt.Errorf("unsupported major version %s\n", versionString) + return fmt.Errorf("unsupported major version %s", versionString) } return nil } @@ -78,9 +77,9 @@ func (s *Server) getServerStatus() error { if err != nil { return errors.New("could not parse server_status results") } - host, port, err := net.SplitHostPort(s.Url.Host) + host, port, err := net.SplitHostPort(s.URL.Host) if err != nil { - return fmt.Errorf("unable to determine provided hostname from %s\n", s.Url.Host) + return fmt.Errorf("unable to determine provided hostname from %s", s.URL.Host) } driverPort, _ := strconv.Atoi(port) for _, ss := range serverStatuses { @@ -92,12 +91,12 @@ func (s *Server) getServerStatus() error { } } - return fmt.Errorf("unable to determine host id from server_status with %s", s.Url.Host) + return fmt.Errorf("unable to determine host id from server_status with %s", s.URL.Host) } func (s *Server) getDefaultTags() map[string]string { tags := make(map[string]string) - tags["rethinkdb_host"] = s.Url.Host + tags["rethinkdb_host"] = s.URL.Host tags["rethinkdb_hostname"] = s.serverStatus.Network.Hostname return tags } @@ -113,12 +112,12 @@ var ClusterTracking = []string{ func (s *Server) addClusterStats(acc telegraf.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"cluster"}).Run(s.session) if err != nil { - return fmt.Errorf("cluster stats query error, %s\n", err.Error()) + return fmt.Errorf("cluster stats query error, %s", err.Error()) } defer cursor.Close() var clusterStats stats if err := cursor.One(&clusterStats); err != nil { - return fmt.Errorf("failure to parse cluster stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse cluster stats, %s", err.Error()) } tags := s.getDefaultTags() @@ -139,14 +138,14 @@ var MemberTracking = []string{ } func (s *Server) addMemberStats(acc telegraf.Accumulator) error { - cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.Id}).Run(s.session) + cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.ID}).Run(s.session) if err != nil { - return fmt.Errorf("member stats query error, %s\n", err.Error()) + return fmt.Errorf("member stats query error, %s", err.Error()) } defer cursor.Close() var memberStats stats if err := cursor.One(&memberStats); err != nil { - return fmt.Errorf("failure to parse member stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse member stats, %s", err.Error()) } tags := s.getDefaultTags() @@ -165,7 +164,7 @@ var TableTracking = []string{ func (s *Server) addTableStats(acc telegraf.Accumulator) error { tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session) if err != nil { - return fmt.Errorf("table stats query error, %s\n", err.Error()) + return fmt.Errorf("table stats query error, %s", err.Error()) } defer tablesCursor.Close() @@ -176,15 +175,15 @@ func (s *Server) addTableStats(acc telegraf.Accumulator) error { } for _, table := range tables { cursor, err := gorethink.DB("rethinkdb").Table("stats"). - Get([]string{"table_server", table.Id, s.serverStatus.Id}). + Get([]string{"table_server", table.ID, s.serverStatus.ID}). Run(s.session) if err != nil { - return fmt.Errorf("table stats query error, %s\n", err.Error()) + return fmt.Errorf("table stats query error, %s", err.Error()) } defer cursor.Close() var ts tableStats if err := cursor.One(&ts); err != nil { - return fmt.Errorf("failure to parse table stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse table stats, %s", err.Error()) } tags := s.getDefaultTags() diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 82ff292804a8c..0119131900b61 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb diff --git a/plugins/inputs/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go index fa2cc92f2b06c..651042ab13783 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb @@ -28,18 +29,18 @@ func init() { func testSetup(m *testing.M) { var err error - server = &Server{Url: &url.URL{Host: connect_url}} + server = &Server{URL: &url.URL{Host: connect_url}} if authKey { server.session, _ = gorethink.Connect(gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, AuthKey: authKey, HandshakeVersion: gorethink.HandshakeV0_4, DiscoverHosts: false, }) } else { server.session, _ = gorethink.Connect(gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, Username: username, Password: password, HandshakeVersion: gorethink.HandshakeV1_0, diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go index 19f6222890360..6a1a98e4586a1 100644 --- a/plugins/inputs/riak/riak.go +++ b/plugins/inputs/riak/riak.go @@ -21,19 +21,19 @@ type Riak struct { // NewRiak return a new instance of Riak with a default http client func NewRiak() *Riak { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second} client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return &Riak{client: client} } // Type riakStats represents the data that is received from Riak type riakStats struct { - CpuAvg1 int64 `json:"cpu_avg1"` - CpuAvg15 int64 `json:"cpu_avg15"` - CpuAvg5 int64 `json:"cpu_avg5"` + CPUAvg1 int64 `json:"cpu_avg1"` + CPUAvg15 int64 `json:"cpu_avg15"` + CPUAvg5 int64 `json:"cpu_avg5"` MemoryCode int64 `json:"memory_code"` MemoryEts int64 `json:"memory_ets"` MemoryProcesses int64 `json:"memory_processes"` @@ -144,9 +144,9 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error { // Build a map of field values fields := map[string]interface{}{ - "cpu_avg1": stats.CpuAvg1, - "cpu_avg15": stats.CpuAvg15, - "cpu_avg5": stats.CpuAvg5, + "cpu_avg1": stats.CPUAvg1, + "cpu_avg15": stats.CPUAvg15, + "cpu_avg5": stats.CPUAvg5, "memory_code": stats.MemoryCode, "memory_ets": stats.MemoryEts, "memory_processes": stats.MemoryProcesses, diff --git a/plugins/inputs/riak/riak_test.go b/plugins/inputs/riak/riak_test.go index 09f9a961f4d76..90688b17827b0 100644 --- a/plugins/inputs/riak/riak_test.go +++ b/plugins/inputs/riak/riak_test.go @@ -15,7 +15,8 @@ func TestRiak(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() @@ -31,8 +32,7 @@ func TestRiak(t *testing.T) { acc := &testutil.Accumulator{} // Gather data from the test server - err = riak.Gather(acc) - require.NoError(t, err) + require.NoError(t, riak.Gather(acc)) // Expect the correct values for all known keys expectFields := map[string]interface{}{ diff --git a/plugins/inputs/riemann_listener/README.md b/plugins/inputs/riemann_listener/README.md new file mode 100644 index 0000000000000..54e70be6ecb71 --- /dev/null +++ b/plugins/inputs/riemann_listener/README.md @@ -0,0 +1,42 @@ +# Riemann Listener Input Plugin + +The Riemann Listener is a simple input plugin that listens for messages from +client that use riemann clients using riemann-protobuff format. + + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.rimann_listener]] + ## URL to listen on + ## Default is "tcp://:5555" + # service_address = "tcp://:8094" + # service_address = "tcp://127.0.0.1:http" + # service_address = "tcp4://:8094" + # service_address = "tcp6://:8094" + # service_address = "tcp6://[2001:db8::1]:8094" + + ## Maximum number of concurrent connections. + ## 0 (default) is unlimited. + # max_connections = 1024 + ## Read timeout. + ## 0 (default) is unlimited. + # read_timeout = "30s" + ## Optional TLS configuration. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Enables client authentication if set. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Maximum socket buffer size (in bytes when no unit specified). + # read_buffer_size = "64KiB" + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" +``` +Just like Riemann the default port is 5555. This can be configured, refer configuration above. + +Riemann `Service` is mapped as `measurement`. `metric` and `TTL` are converted into field values. +As Riemann tags as simply an array, they are converted into the `influx_line` format key-value, where both key and value are the tags. diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go new file mode 100644 index 0000000000000..03b28ad2cb07f --- /dev/null +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -0,0 +1,395 @@ +package riemann_listener + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "log" + "net" + "os" + "os/signal" + "strings" + "sync" + "time" + + riemanngo "github.com/riemann/riemann-go-client" + riemangoProto "github.com/riemann/riemann-go-client/proto" + "google.golang.org/protobuf/proto" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type RiemannSocketListener struct { + ServiceAddress string `toml:"service_address"` + MaxConnections int `toml:"max_connections"` + ReadBufferSize config.Size `toml:"read_buffer_size"` + ReadTimeout *config.Duration `toml:"read_timeout"` + KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` + tlsint.ServerConfig + + wg sync.WaitGroup + + Log telegraf.Logger + + telegraf.Accumulator +} +type setReadBufferer interface { + SetReadBuffer(bytes int) error +} + +type riemannListener struct { + net.Listener + *RiemannSocketListener + + sockType string + + connections map[string]net.Conn + connectionsMtx sync.Mutex +} + +func (rsl *riemannListener) listen(ctx context.Context) { + rsl.connections = map[string]net.Conn{} + + wg := sync.WaitGroup{} + + select { + case <-ctx.Done(): + rsl.closeAllConnections() + wg.Wait() + return + default: + for { + c, err := rsl.Accept() + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + rsl.Log.Error(err.Error()) + } + break + } + + if rsl.ReadBufferSize > 0 { + if srb, ok := c.(setReadBufferer); ok { + if err := srb.SetReadBuffer(int(rsl.ReadBufferSize)); err != nil { + rsl.Log.Warnf("Setting read buffer failed: %v", err) + } + } else { + rsl.Log.Warnf("Unable to set read buffer on a %s socket", rsl.sockType) + } + } + + rsl.connectionsMtx.Lock() + if rsl.MaxConnections > 0 && len(rsl.connections) >= rsl.MaxConnections { + rsl.connectionsMtx.Unlock() + if err := c.Close(); err != nil { + rsl.Log.Warnf("Closing the connection failed: %v", err) + } + continue + } + rsl.connections[c.RemoteAddr().String()] = c + rsl.connectionsMtx.Unlock() + + if err := rsl.setKeepAlive(c); err != nil { + rsl.Log.Errorf("Unable to configure keep alive %q: %s", rsl.ServiceAddress, err.Error()) + } + + wg.Add(1) + go func() { + defer wg.Done() + rsl.read(c) + }() + } + rsl.closeAllConnections() + wg.Wait() + } +} + +func (rsl *riemannListener) closeAllConnections() { + rsl.connectionsMtx.Lock() + for _, c := range rsl.connections { + if err := c.Close(); err != nil { + rsl.Log.Warnf("Closing the connection failed: %v", err.Error()) + } + } + rsl.connectionsMtx.Unlock() +} + +func (rsl *riemannListener) setKeepAlive(c net.Conn) error { + if rsl.KeepAlivePeriod == nil { + return nil + } + tcpc, ok := c.(*net.TCPConn) + if !ok { + return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(rsl.ServiceAddress, "://", 2)[0]) + } + if *rsl.KeepAlivePeriod == 0 { + return tcpc.SetKeepAlive(false) + } + if err := tcpc.SetKeepAlive(true); err != nil { + return err + } + return tcpc.SetKeepAlivePeriod(time.Duration(*rsl.KeepAlivePeriod)) +} + +func (rsl *riemannListener) removeConnection(c net.Conn) { + rsl.connectionsMtx.Lock() + delete(rsl.connections, c.RemoteAddr().String()) + rsl.connectionsMtx.Unlock() +} + +//Utilities + +/* +readMessages will read Riemann messages in binary format +from the TCP connection. byte Array p size will depend on the size +of the riemann message as sent by the cleint +*/ +func readMessages(r io.Reader, p []byte) error { + for len(p) > 0 { + n, err := r.Read(p) + p = p[n:] + if err != nil { + return err + } + } + return nil +} + +func checkError(err error) { + log.Println("The error is") + if err != nil { + log.Println(err.Error()) + } +} + +func (rsl *riemannListener) read(conn net.Conn) { + defer rsl.removeConnection(conn) + defer conn.Close() + var err error + + for { + if rsl.ReadTimeout != nil && *rsl.ReadTimeout > 0 { + if err := conn.SetDeadline(time.Now().Add(time.Duration(*rsl.ReadTimeout))); err != nil { + rsl.Log.Warnf("Setting deadline failed: %v", err) + } + } + + messagePb := &riemangoProto.Msg{} + var header uint32 + // First obtain the size of the riemann event from client and acknowledge + if err = binary.Read(conn, binary.BigEndian, &header); err != nil { + if err.Error() != "EOF" { + rsl.Log.Debugf("Failed to read header") + riemannReturnErrorResponse(conn, err.Error()) + return + } + return + } + data := make([]byte, header) + + if err = readMessages(conn, data); err != nil { + rsl.Log.Debugf("Failed to read body: %s", err.Error()) + riemannReturnErrorResponse(conn, "Failed to read body") + return + } + if err = proto.Unmarshal(data, messagePb); err != nil { + rsl.Log.Debugf("Failed to unmarshal: %s", err.Error()) + riemannReturnErrorResponse(conn, "Failed to unmarshal") + return + } + riemannEvents := riemanngo.ProtocolBuffersToEvents(messagePb.Events) + + for _, m := range riemannEvents { + if m.Service == "" { + riemannReturnErrorResponse(conn, "No Service Name") + return + } + tags := make(map[string]string) + fieldValues := map[string]interface{}{} + for _, tag := range m.Tags { + tags[strings.ReplaceAll(tag, " ", "_")] = tag + } + tags["Host"] = m.Host + tags["Description"] = m.Description + tags["State"] = m.State + fieldValues["Metric"] = m.Metric + fieldValues["TTL"] = m.TTL.Seconds() + singleMetric := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped) + rsl.AddMetric(singleMetric) + } + riemannReturnResponse(conn) + } +} + +func riemannReturnResponse(conn net.Conn) { + t := true + message := new(riemangoProto.Msg) + message.Ok = &t + returnData, err := proto.Marshal(message) + if err != nil { + checkError(err) + return + } + b := new(bytes.Buffer) + if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { + checkError(err) + } + // send the msg length + if _, err = conn.Write(b.Bytes()); err != nil { + checkError(err) + } + if _, err = conn.Write(returnData); err != nil { + checkError(err) + } +} + +func riemannReturnErrorResponse(conn net.Conn, errorMessage string) { + t := false + message := new(riemangoProto.Msg) + message.Ok = &t + message.Error = &errorMessage + returnData, err := proto.Marshal(message) + if err != nil { + checkError(err) + return + } + b := new(bytes.Buffer) + if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { + checkError(err) + } + // send the msg length + if _, err = conn.Write(b.Bytes()); err != nil { + checkError(err) + } + if _, err = conn.Write(returnData); err != nil { + log.Println("Somethign") + checkError(err) + } +} + +func (rsl *RiemannSocketListener) Description() string { + return "Riemann protobuff listener." +} + +func (rsl *RiemannSocketListener) SampleConfig() string { + return ` + ## URL to listen on. + ## Default is "tcp://:5555" + # service_address = "tcp://:8094" + # service_address = "tcp://127.0.0.1:http" + # service_address = "tcp4://:8094" + # service_address = "tcp6://:8094" + # service_address = "tcp6://[2001:db8::1]:8094" + + ## Maximum number of concurrent connections. + ## 0 (default) is unlimited. + # max_connections = 1024 + ## Read timeout. + ## 0 (default) is unlimited. + # read_timeout = "30s" + ## Optional TLS configuration. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Enables client authentication if set. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Maximum socket buffer size (in bytes when no unit specified). + # read_buffer_size = "64KiB" + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" +` +} + +func (rsl *RiemannSocketListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { + ctx, cancelFunc := context.WithCancel(context.Background()) + go processOsSignals(cancelFunc) + rsl.Accumulator = acc + if rsl.ServiceAddress == "" { + rsl.Log.Warnf("Using default service_address tcp://:5555") + rsl.ServiceAddress = "tcp://:5555" + } + spl := strings.SplitN(rsl.ServiceAddress, "://", 2) + if len(spl) != 2 { + return fmt.Errorf("invalid service address: %s", rsl.ServiceAddress) + } + + protocol := spl[0] + addr := spl[1] + + switch protocol { + case "tcp", "tcp4", "tcp6": + tlsCfg, err := rsl.ServerConfig.TLSConfig() + if err != nil { + return err + } + + var l net.Listener + if tlsCfg == nil { + l, err = net.Listen(protocol, addr) + } else { + l, err = tls.Listen(protocol, addr, tlsCfg) + } + if err != nil { + return err + } + + rsl.Log.Infof("Listening on %s://%s", protocol, l.Addr()) + + rsl := &riemannListener{ + Listener: l, + RiemannSocketListener: rsl, + sockType: spl[0], + } + + rsl.wg = sync.WaitGroup{} + rsl.wg.Add(1) + go func() { + defer rsl.wg.Done() + rsl.listen(ctx) + }() + default: + return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, rsl.ServiceAddress) + } + + return nil +} + +// Handle cancellations from the process +func processOsSignals(cancelFunc context.CancelFunc) { + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + for { + sig := <-signalChan + switch sig { + case os.Interrupt: + log.Println("Signal SIGINT is received, probably due to `Ctrl-C`, exiting ...") + cancelFunc() + return + } + } +} + +func (rsl *RiemannSocketListener) Stop() { + rsl.wg.Done() + rsl.wg.Wait() +} + +func newRiemannSocketListener() *RiemannSocketListener { + return &RiemannSocketListener{} +} + +func init() { + inputs.Add("riemann_listener", func() telegraf.Input { return newRiemannSocketListener() }) +} diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go new file mode 100644 index 0000000000000..7a995fc475cb7 --- /dev/null +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -0,0 +1,54 @@ +package riemann_listener + +import ( + "log" + "testing" + "time" + + riemanngo "github.com/riemann/riemann-go-client" + "github.com/stretchr/testify/require" + "gotest.tools/assert" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +func TestSocketListener_tcp(t *testing.T) { + log.Println("Entering") + + sl := newRiemannSocketListener() + sl.Log = testutil.Logger{} + sl.ServiceAddress = "tcp://127.0.0.1:5555" + sl.ReadBufferSize = config.Size(1024) + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + testStats(t) + testMissingService(t) +} +func testStats(t *testing.T) { + c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) + err := c.Connect() + if err != nil { + log.Println("Error") + panic(err) + } + defer c.Close() + result, err := riemanngo.SendEvent(c, &riemanngo.Event{ + Service: "hello", + }) + assert.Equal(t, result.GetOk(), true) +} +func testMissingService(t *testing.T) { + c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) + err := c.Connect() + if err != nil { + panic(err) + } + defer c.Close() + result, err := riemanngo.SendEvent(c, &riemanngo.Event{}) + assert.Equal(t, result.GetOk(), false) +} diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index b66266d3f17d2..f7c321d7ae978 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -62,11 +61,11 @@ const defaultEnvironment = "production" // returns a new Salesforce plugin instance func NewSalesforce() *Salesforce { tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(5 * time.Second), + ResponseHeaderTimeout: 5 * time.Second, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(10 * time.Second), + Timeout: 10 * time.Second, } return &Salesforce{ client: client, @@ -147,7 +146,7 @@ func (s *Salesforce) fetchLimits() (limits, error) { } if resp.StatusCode != http.StatusOK { - return l, fmt.Errorf("Salesforce responded with unexpected status code %d", resp.StatusCode) + return l, fmt.Errorf("salesforce responded with unexpected status code %d", resp.StatusCode) } l = limits{} @@ -203,11 +202,11 @@ func (s *Salesforce) login() error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) } - respBody, err := ioutil.ReadAll(resp.Body) + respBody, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/salesforce/salesforce_test.go b/plugins/inputs/salesforce/salesforce_test.go index 288cc0f40af79..3d26d87dda964 100644 --- a/plugins/inputs/salesforce/salesforce_test.go +++ b/plugins/inputs/salesforce/salesforce_test.go @@ -14,7 +14,7 @@ import ( func Test_Gather(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") - _, _ = w.Write([]byte(testJson)) + _, _ = w.Write([]byte(testJSON)) })) defer fakeServer.Close() @@ -35,7 +35,7 @@ func Test_Gather(t *testing.T) { require.Len(t, m.Tags, 2) } -var testJson = `{ +var testJSON = `{ "ConcurrentAsyncGetReportInstances" : { "Max" : 200, "Remaining" : 200 diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 1df88466be2e9..f2590c105272a 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors @@ -12,6 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -19,12 +21,12 @@ import ( var ( execCommand = exec.Command // execCommand is used to mock commands in tests. numberRegp = regexp.MustCompile("[0-9]+") - defaultTimeout = internal.Duration{Duration: 5 * time.Second} + defaultTimeout = config.Duration(5 * time.Second) ) type Sensors struct { - RemoveNumbers bool `toml:"remove_numbers"` - Timeout internal.Duration `toml:"timeout"` + RemoveNumbers bool `toml:"remove_numbers"` + Timeout config.Duration `toml:"timeout"` path string } @@ -41,7 +43,6 @@ func (*Sensors) SampleConfig() string { ## Timeout is the maximum amount of time that the sensors command can run. # timeout = "5s" ` - } func (s *Sensors) Gather(acc telegraf.Accumulator) error { @@ -60,7 +61,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error { fields := map[string]interface{}{} chip := "" cmd := execCommand(s.path, "-A", "-u") - out, err := internal.StdOutputTimeout(cmd, s.Timeout.Duration) + out, err := internal.StdOutputTimeout(cmd, time.Duration(s.Timeout)) if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go index 62a6211598f4e..424e96181b46b 100644 --- a/plugins/inputs/sensors/sensors_notlinux.go +++ b/plugins/inputs/sensors/sensors_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sensors diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 2a24fa6f9212f..be4cace6eab79 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors @@ -8,6 +9,8 @@ import ( "os/exec" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) @@ -22,10 +25,7 @@ func TestGatherDefault(t *testing.T) { defer func() { execCommand = exec.Command }() var acc testutil.Accumulator - err := s.Gather(&acc) - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Gather(&acc)) var tests = []struct { tags map[string]string @@ -163,10 +163,7 @@ func TestGatherNotRemoveNumbers(t *testing.T) { defer func() { execCommand = exec.Command }() var acc testutil.Accumulator - err := s.Gather(&acc) - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Gather(&acc)) var tests = []struct { tags map[string]string @@ -306,7 +303,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -373,11 +370,12 @@ Vcore Voltage: cmd, args := args[3], args[4:] if cmd == "sensors" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") os.Exit(1) - } os.Exit(0) } diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md index 66d556e17c694..9e5366706e5df 100644 --- a/plugins/inputs/sflow/README.md +++ b/plugins/inputs/sflow/README.md @@ -14,8 +14,6 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. @@ -113,8 +111,6 @@ This sflow implementation was built from the reference document [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/sflow/metricencoder.go b/plugins/inputs/sflow/metricencoder.go index ffc9d8e023849..2dc1fb122b096 100644 --- a/plugins/inputs/sflow/metricencoder.go +++ b/plugins/inputs/sflow/metricencoder.go @@ -34,10 +34,7 @@ func makeMetrics(p *V5Format) ([]telegraf.Metric, error) { for k, v := range fields { fields2[k] = v } - m, err := metric.New("sflow", tags2, fields2, now) - if err != nil { - return nil, err - } + m := metric.New("sflow", tags2, fields2, now) metrics = append(metrics, m) } } diff --git a/plugins/inputs/sflow/packetdecoder_test.go b/plugins/inputs/sflow/packetdecoder_test.go index f078eaf310e8b..bb318a86a1932 100644 --- a/plugins/inputs/sflow/packetdecoder_test.go +++ b/plugins/inputs/sflow/packetdecoder_test.go @@ -40,7 +40,8 @@ func BenchmarkUDPHeader(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - dc.decodeUDPHeader(octets) + _, err := dc.decodeUDPHeader(octets) + require.NoError(b, err) } } diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go index 2e3fbc0cf73f5..3b18409c13a77 100644 --- a/plugins/inputs/sflow/sflow.go +++ b/plugins/inputs/sflow/sflow.go @@ -2,7 +2,6 @@ package sflow import ( "bytes" - "context" "fmt" "io" "net" @@ -11,7 +10,7 @@ import ( "sync" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -32,15 +31,14 @@ const ( ) type SFlow struct { - ServiceAddress string `toml:"service_address"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` + ServiceAddress string `toml:"service_address"` + ReadBufferSize config.Size `toml:"read_buffer_size"` Log telegraf.Logger `toml:"-"` addr net.Addr decoder *PacketDecoder closer io.Closer - cancel context.CancelFunc wg sync.WaitGroup } @@ -85,8 +83,10 @@ func (s *SFlow) Start(acc telegraf.Accumulator) error { s.closer = conn s.addr = conn.LocalAddr() - if s.ReadBufferSize.Size > 0 { - conn.SetReadBuffer(int(s.ReadBufferSize.Size)) + if s.ReadBufferSize > 0 { + if err := conn.SetReadBuffer(int(s.ReadBufferSize)); err != nil { + return err + } } s.Log.Infof("Listening on %s://%s", s.addr.Network(), s.addr.String()) @@ -107,6 +107,8 @@ func (s *SFlow) Gather(_ telegraf.Accumulator) error { func (s *SFlow) Stop() { if s.closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.closer.Close() } s.wg.Wait() @@ -131,7 +133,6 @@ func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) { } func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) { - if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil { acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) } diff --git a/plugins/inputs/sflow/sflow_test.go b/plugins/inputs/sflow/sflow_test.go index 2df56c2ae97cd..6129c2d95c079 100644 --- a/plugins/inputs/sflow/sflow_test.go +++ b/plugins/inputs/sflow/sflow_test.go @@ -29,7 +29,8 @@ func TestSFlow(t *testing.T) { packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) acc.Wait(2) @@ -129,7 +130,8 @@ func BenchmarkSFlow(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - client.Write(packetBytes) + _, err := client.Write(packetBytes) + require.NoError(b, err) acc.Wait(2) } } diff --git a/plugins/inputs/sflow/types.go b/plugins/inputs/sflow/types.go index a48857803b40d..7efd59aff0c71 100644 --- a/plugins/inputs/sflow/types.go +++ b/plugins/inputs/sflow/types.go @@ -6,13 +6,8 @@ import ( ) const ( - AddressTypeIPv6 uint32 = 2 // sflow_version_5.txt line: 1384 - AddressTypeIPv4 uint32 = 1 // sflow_version_5.txt line: 1383 - IPProtocolTCP uint8 = 6 IPProtocolUDP uint8 = 17 - - metricName = "sflow" ) var ETypeMap = map[uint16]string{ @@ -20,11 +15,6 @@ var ETypeMap = map[uint16]string{ 0x86DD: "IPv6", } -var IPvMap = map[uint32]string{ - 1: "IPV4", // sflow_version_5.txt line: 1383 - 2: "IPV6", // sflow_version_5.txt line: 1384 -} - type ContainsMetricData interface { GetTags() map[string]string GetFields() map[string]interface{} @@ -118,12 +108,22 @@ type RawPacketHeaderFlowData struct { } func (h RawPacketHeaderFlowData) GetTags() map[string]string { - t := h.Header.GetTags() + var t map[string]string + if h.Header != nil { + t = h.Header.GetTags() + } else { + t = map[string]string{} + } t["header_protocol"] = HeaderProtocolMap[h.HeaderProtocol] return t } func (h RawPacketHeaderFlowData) GetFields() map[string]interface{} { - f := h.Header.GetFields() + var f map[string]interface{} + if h.Header != nil { + f = h.Header.GetFields() + } else { + f = map[string]interface{}{} + } f["bytes"] = h.Bytes f["frame_length"] = h.FrameLength f["header_length"] = h.HeaderLength @@ -143,14 +143,22 @@ type EthHeader struct { } func (h EthHeader) GetTags() map[string]string { - t := h.IPHeader.GetTags() + var t map[string]string + if h.IPHeader != nil { + t = h.IPHeader.GetTags() + } else { + t = map[string]string{} + } t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String() t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String() t["ether_type"] = h.EtherType return t } func (h EthHeader) GetFields() map[string]interface{} { - return h.IPHeader.GetFields() + if h.IPHeader != nil { + return h.IPHeader.GetFields() + } + return map[string]interface{}{} } type ProtocolHeader ContainsMetricData diff --git a/plugins/inputs/sflow/types_test.go b/plugins/inputs/sflow/types_test.go new file mode 100644 index 0000000000000..d59ac0ae23941 --- /dev/null +++ b/plugins/inputs/sflow/types_test.go @@ -0,0 +1,43 @@ +package sflow + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRawPacketHeaderFlowData(t *testing.T) { + h := RawPacketHeaderFlowData{ + HeaderProtocol: HeaderProtocolTypeEthernetISO88023, + FrameLength: 64, + Bytes: 64, + StrippedOctets: 0, + HeaderLength: 0, + Header: nil, + } + tags := h.GetTags() + fields := h.GetFields() + + require.NotNil(t, fields) + require.NotNil(t, tags) + require.Contains(t, tags, "header_protocol") + require.Equal(t, 1, len(tags)) +} + +// process a raw ethernet packet without any encapsulated protocol +func TestEthHeader(t *testing.T) { + h := EthHeader{ + DestinationMAC: [6]byte{0xca, 0xff, 0xee, 0xff, 0xe, 0x0}, + SourceMAC: [6]byte{0xde, 0xad, 0xbe, 0xef, 0x0, 0x0}, + TagProtocolIdentifier: 0x88B5, // IEEE Std 802 - Local Experimental Ethertype + TagControlInformation: 0, + EtherTypeCode: 0, + EtherType: "", + IPHeader: nil, + } + tags := h.GetTags() + fields := h.GetFields() + + require.NotNil(t, fields) + require.NotNil(t, tags) +} diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index adc23f0921e26..b0f189d69fbf9 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -12,32 +12,36 @@ import ( "sync" "syscall" "time" - "unicode" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -const IntelVID = "0x8086" +const intelVID = "0x8086" var ( // Device Model: APPLE SSD SM256E // Product: HUH721212AL5204 // Model Number: TS128GMTE850 - modelInfo = regexp.MustCompile("^(Device Model|Product|Model Number):\\s+(.*)$") + modelInfo = regexp.MustCompile(`^(Device Model|Product|Model Number):\s+(.*)$`) // Serial Number: S0X5NZBC422720 - serialInfo = regexp.MustCompile("(?i)^Serial Number:\\s+(.*)$") + serialInfo = regexp.MustCompile(`(?i)^Serial Number:\s+(.*)$`) // LU WWN Device Id: 5 002538 655584d30 - wwnInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") + wwnInfo = regexp.MustCompile(`^LU WWN Device Id:\s+(.*)$`) // User Capacity: 251,000,193,024 bytes [251 GB] - userCapacityInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") + userCapacityInfo = regexp.MustCompile(`^User Capacity:\s+([0-9,]+)\s+bytes.*$`) // SMART support is: Enabled - smartEnabledInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") + smartEnabledInfo = regexp.MustCompile(`^SMART support is:\s+(\w+)$`) + // Power mode is: ACTIVE or IDLE or Power mode was: STANDBY + powermodeInfo = regexp.MustCompile(`^Power mode \w+:\s+(\w+)`) + // Device is in STANDBY mode + standbyInfo = regexp.MustCompile(`^Device is in\s+(\w+)`) // SMART overall-health self-assessment test result: PASSED // SMART Health Status: OK // PASSED, FAILED, UNKNOWN - smartOverallHealth = regexp.MustCompile("^(SMART overall-health self-assessment test result|SMART Health Status):\\s+(\\w+).*$") + smartOverallHealth = regexp.MustCompile(`^(SMART overall-health self-assessment test result|SMART Health Status):\s+(\w+).*$`) // sasNvmeAttr is a SAS or NVME SMART attribute sasNvmeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) @@ -46,7 +50,7 @@ var ( // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 // 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 // 192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 - attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9-]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") + attribute = regexp.MustCompile(`^\s*([0-9]+)\s(\S+)\s+([-P][-O][-S][-R][-C][-K])\s+([0-9]+)\s+([0-9]+)\s+([0-9-]+)\s+([-\w]+)\s+([\w\+\.]+).*$`) // Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff // key normalized raw @@ -55,7 +59,7 @@ var ( // vid : 0x8086 // sn : CFGT53260XSP8011P - nvmeIdCtrlExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\s\w]+)`) + nvmeIDCtrlExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\s\w]+)`) deviceFieldIds = map[string]string{ "1": "read_error_rate", @@ -267,27 +271,28 @@ var ( } ) -type NVMeDevice struct { +// Smart plugin reads metrics from storage devices supporting S.M.A.R.T. +type Smart struct { + Path string `toml:"path"` //deprecated - to keep backward compatibility + PathSmartctl string `toml:"path_smartctl"` + PathNVMe string `toml:"path_nvme"` + Nocheck string `toml:"nocheck"` + EnableExtensions []string `toml:"enable_extensions"` + Attributes bool `toml:"attributes"` + Excludes []string `toml:"excludes"` + Devices []string `toml:"devices"` + UseSudo bool `toml:"use_sudo"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` +} + +type nvmeDevice struct { name string vendorID string model string serialNumber string } -type Smart struct { - Path string `toml:"path"` //deprecated - to keep backward compatibility - PathSmartctl string `toml:"path_smartctl"` - PathNVMe string `toml:"path_nvme"` - Nocheck string `toml:"nocheck"` - EnableExtensions []string `toml:"enable_extensions"` - Attributes bool `toml:"attributes"` - Excludes []string `toml:"excludes"` - Devices []string `toml:"devices"` - UseSudo bool `toml:"use_sudo"` - Timeout internal.Duration `toml:"timeout"` - Log telegraf.Logger `toml:"-"` -} - var sampleConfig = ` ## Optionally specify the path to the smartctl executable # path_smartctl = "/usr/bin/smartctl" @@ -330,20 +335,23 @@ var sampleConfig = ` # timeout = "30s" ` -func NewSmart() *Smart { +func newSmart() *Smart { return &Smart{ - Timeout: internal.Duration{Duration: time.Second * 30}, + Timeout: config.Duration(time.Second * 30), } } +// SampleConfig returns sample configuration for this plugin. func (m *Smart) SampleConfig() string { return sampleConfig } +// Description returns the plugin description. func (m *Smart) Description() string { return "Read metrics from storage devices supporting S.M.A.R.T." } +// Init performs one time setup of the plugin and returns an error if the configuration is invalid. func (m *Smart) Init() error { //if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist if len(m.Path) > 0 && len(m.PathSmartctl) == 0 { @@ -377,6 +385,7 @@ func (m *Smart) Init() error { return nil } +// Gather takes in an accumulator and adds the metrics that the SMART tools gather. func (m *Smart) Gather(acc telegraf.Accumulator) error { var err error var scannedNVMeDevices []string @@ -387,8 +396,6 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { isVendorExtension := len(m.EnableExtensions) != 0 if len(m.Devices) != 0 { - devicesFromConfig = excludeWrongDeviceNames(devicesFromConfig) - m.getAttributes(acc, devicesFromConfig) // if nvme-cli is present, vendor specific attributes can be gathered @@ -418,31 +425,6 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { return nil } -// validate and exclude not correct config device names to avoid unwanted behaviours -func excludeWrongDeviceNames(devices []string) []string { - validSigns := map[string]struct{}{ - " ": {}, - "/": {}, - "\\": {}, - "-": {}, - ",": {}, - } - var wrongDevices []string - - for _, device := range devices { - for _, char := range device { - if unicode.IsLetter(char) || unicode.IsNumber(char) { - continue - } - if _, exist := validSigns[string(char)]; exist { - continue - } - wrongDevices = append(wrongDevices, device) - } - } - return difference(devices, wrongDevices) -} - func (m *Smart) scanAllDevices(ignoreExcludes bool) ([]string, []string, error) { // this will return all devices (including NVMe devices) for smartctl version >= 7.0 // for older versions this will return non NVMe devices @@ -500,12 +482,12 @@ func (m *Smart) scanDevices(ignoreExcludes bool, scanArgs ...string) ([]string, } // Wrap with sudo -var runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { +var runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { cmd := exec.Command(command, args...) if sudo { cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...) } - return internal.CombinedOutputTimeout(cmd, timeout.Duration) + return internal.CombinedOutputTimeout(cmd, time.Duration(timeout)) } func excludedDev(excludes []string, deviceLine string) bool { @@ -540,11 +522,11 @@ func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []stri for _, device := range NVMeDevices { if contains(m.EnableExtensions, "auto-on") { switch device.vendorID { - case IntelVID: + case intelVID: wg.Add(1) go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) } - } else if contains(m.EnableExtensions, "Intel") && device.vendorID == IntelVID { + } else if contains(m.EnableExtensions, "Intel") && device.vendorID == intelVID { wg.Add(1) go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) } @@ -552,8 +534,8 @@ func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []stri wg.Wait() } -func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout internal.Duration, useSudo bool) []NVMeDevice { - var NVMeDevices []NVMeDevice +func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout config.Duration, useSudo bool) []nvmeDevice { + var NVMeDevices []nvmeDevice for _, device := range devices { vid, sn, mn, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo) @@ -561,7 +543,7 @@ func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme acc.AddError(fmt.Errorf("cannot find device info for %s device", device)) continue } - newDevice := NVMeDevice{ + newDevice := nvmeDevice{ name: device, vendorID: vid, model: mn, @@ -572,7 +554,7 @@ func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme return NVMeDevices } -func gatherNVMeDeviceInfo(nvme, device string, timeout internal.Duration, useSudo bool) (string, string, string, error) { +func gatherNVMeDeviceInfo(nvme, device string, timeout config.Duration, useSudo bool) (string, string, string, error) { args := []string{"id-ctrl"} args = append(args, strings.Split(device, " ")...) out, err := runCmd(timeout, useSudo, nvme, args...) @@ -593,7 +575,7 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { for scanner.Scan() { line := scanner.Text() - if matches := nvmeIdCtrlExpressionPattern.FindStringSubmatch(line); len(matches) > 2 { + if matches := nvmeIDCtrlExpressionPattern.FindStringSubmatch(line); len(matches) > 2 { matches[1] = strings.TrimSpace(matches[1]) matches[2] = strings.TrimSpace(matches[2]) if matches[1] == "vid" { @@ -612,7 +594,7 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { return vid, sn, mn, nil } -func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo bool, nvme string, device NVMeDevice, wg *sync.WaitGroup) { +func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo bool, nvme string, device nvmeDevice, wg *sync.WaitGroup) { defer wg.Done() args := []string{"intel", "smart-log-add"} @@ -659,7 +641,7 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, us } } -func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { +func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { defer wg.Done() // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"} @@ -715,11 +697,24 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co deviceFields["health_ok"] = health[2] == "PASSED" || health[2] == "OK" } + // checks to see if there is a power mode to print to user + // if not look for Device is in STANDBY which happens when + // nocheck is set to standby (will exit to not spin up the disk) + // otherwise nothing is found so nothing is printed (NVMe does not show power) + if power := powermodeInfo.FindStringSubmatch(line); len(power) > 1 { + deviceTags["power"] = power[1] + } else { + if power := standbyInfo.FindStringSubmatch(line); len(power) > 1 { + deviceTags["power"] = power[1] + } + } + tags := map[string]string{} fields := make(map[string]interface{}) if collectAttributes { - keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled"} + //add power mode + keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled", "power"} for _, key := range keys { if value, ok := deviceTags[key]; ok { tags[key] = value @@ -966,7 +961,7 @@ func parseTemperature(fields, deviceFields map[string]interface{}, str string) e return nil } -func parseTemperatureSensor(fields, deviceFields map[string]interface{}, str string) error { +func parseTemperatureSensor(fields, _ map[string]interface{}, str string) error { var temp int64 if _, err := fmt.Sscanf(str, "%d C", &temp); err != nil { return err @@ -993,7 +988,7 @@ func init() { _ = os.Setenv("LC_NUMERIC", "en_US.UTF-8") inputs.Add("smart", func() telegraf.Input { - m := NewSmart() + m := newSmart() m.Nocheck = "standby" return m }) diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 00d8cf0725ea7..5a1799381cebe 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -7,19 +7,19 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGatherAttributes(t *testing.T) { - s := NewSmart() + s := newSmart() s.Attributes = true - assert.Equal(t, time.Second*30, s.Timeout.Duration) + assert.Equal(t, time.Second*30, time.Duration(s.Timeout)) - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { if len(args) > 0 { if args[0] == "--info" && args[7] == "/dev/ada0" { return []byte(mockInfoAttributeData), nil @@ -78,12 +78,12 @@ func TestGatherAttributes(t *testing.T) { } func TestGatherNoAttributes(t *testing.T) { - s := NewSmart() + s := newSmart() s.Attributes = false - assert.Equal(t, time.Second*30, s.Timeout.Duration) + assert.Equal(t, time.Second*30, time.Duration(s.Timeout)) - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { if len(args) > 0 { if args[0] == "--scan" && len(args) == 1 { return []byte(mockScanData), nil @@ -124,7 +124,7 @@ func TestExcludedDev(t *testing.T) { } func TestGatherSATAInfo(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData), nil } @@ -134,13 +134,13 @@ func TestGatherSATAInfo(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSATAInfo65(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData65), nil } @@ -150,13 +150,13 @@ func TestGatherSATAInfo65(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHgstSAS(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSASInfoData), nil } @@ -166,13 +166,13 @@ func TestGatherHgstSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHtSAS(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(htSASInfoData), nil } @@ -182,13 +182,13 @@ func TestGatherHtSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) testutil.RequireMetricsEqual(t, testHtsasAtributtes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } func TestGatherSSD(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdInfoData), nil } @@ -198,13 +198,13 @@ func TestGatherSSD(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSSDRaid(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdRaidInfoData), nil } @@ -214,13 +214,13 @@ func TestGatherSSDRaid(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherNvme(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(smartctlNvmeInfoData), nil } @@ -230,21 +230,21 @@ func TestGatherNvme(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "nvme0", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "nvme0", wg) testutil.RequireMetricsEqual(t, testSmartctlNvmeAttributes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } func TestGatherIntelNvme(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(nvmeIntelInfoData), nil } var ( acc = &testutil.Accumulator{} wg = &sync.WaitGroup{} - device = NVMeDevice{ + device = nvmeDevice{ name: "nvme0", model: mockModel, serialNumber: mockSerial, @@ -252,7 +252,7 @@ func TestGatherIntelNvme(t *testing.T) { ) wg.Add(1) - gatherIntelNVMeDisk(acc, internal.Duration{Duration: time.Second * 30}, true, "", device, wg) + gatherIntelNVMeDisk(acc, config.Duration(time.Second*30), true, "", device, wg) result := acc.GetTelegrafMetrics() testutil.RequireMetricsEqual(t, testIntelInvmeAttributes, result, @@ -275,13 +275,6 @@ func Test_checkForNVMeDevices(t *testing.T) { assert.Equal(t, expectedNVMeDevices, resultNVMeDevices) } -func Test_excludeWrongDeviceNames(t *testing.T) { - devices := []string{"/dev/sda", "/dev/nvme -d nvme", "/dev/sda1 -d megaraid,1", "/dev/sda ; ./suspicious_script.sh"} - validDevices := []string{"/dev/sda", "/dev/nvme -d nvme", "/dev/sda1 -d megaraid,1"} - result := excludeWrongDeviceNames(devices) - assert.Equal(t, validDevices, result) -} - func Test_contains(t *testing.T) { devices := []string{"/dev/sda", "/dev/nvme1"} device := "/dev/nvme1" @@ -299,7 +292,7 @@ func Test_difference(t *testing.T) { } func Test_integerOverflow(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(smartctlNvmeInfoDataWithOverflow), nil } @@ -310,7 +303,7 @@ func Test_integerOverflow(t *testing.T) { t.Run("If data raw_value is out of int64 range, there should be no metrics for that attribute", func(t *testing.T) { wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "nvme0", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "nvme0", wg) result := acc.GetTelegrafMetrics() testutil.RequireMetricsEqual(t, testOverflowAttributes, result, diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index a0c9155db5432..0d52881a72f04 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -22,8 +22,13 @@ information. ```toml [[inputs.snmp]] ## Agent addresses to retrieve values from. + ## format: agents = [":"] + ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. + ## default is udp + ## port: optional ## example: agents = ["udp://127.0.0.1:161"] ## agents = ["tcp://127.0.0.1:161"] + ## agents = ["udp4://v4only-snmp-agent"] agents = ["udp://127.0.0.1:161"] ## Timeout for each request. @@ -48,7 +53,7 @@ information. ## ## Security Name. # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". + ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # auth_protocol = "MD5" ## Authentication password. # auth_password = "pass" @@ -56,7 +61,9 @@ information. # sec_level = "authNoPriv" ## Context Name. # context_name = "" - ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". + ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools + ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) # priv_protocol = "" ## Privacy password used for encrypted messages. # priv_password = "" @@ -113,15 +120,21 @@ option operate similar to the `snmpget` utility. # is_tag = false ## Apply one of the following conversions to the variable value: - ## float(X) Convert the input value into a float and divides by the - ## Xth power of 10. Effectively just moves the decimal left - ## X places. For example a value of `123` with `float(2)` - ## will result in `1.23`. - ## float: Convert the value into a float with no adjustment. Same - ## as `float(0)`. - ## int: Convert the value into an integer. - ## hwaddr: Convert the value to a MAC address. - ## ipaddr: Convert the value to an IP address. + ## float(X): Convert the input value into a float and divides by the + ## Xth power of 10. Effectively just moves the decimal left + ## X places. For example a value of `123` with `float(2)` + ## will result in `1.23`. + ## float: Convert the value into a float with no adjustment. Same + ## as `float(0)`. + ## int: Convert the value into an integer. + ## hwaddr: Convert the value to a MAC address. + ## ipaddr: Convert the value to an IP address. + ## hextoint:X:Y Convert a hex string value to integer. Where X is the Endian + ## and Y the bit size. For example: hextoint:LittleEndian:uint64 + ## or hextoint:BigEndian:uint32. Valid options for the Endian are: + ## BigEndian and LittleEndian. For the bit size: uint16, uint32 + ## and uint64. + ## # conversion = "" ``` @@ -184,6 +197,10 @@ One [metric][] is created for each row of the SNMP table. ## path segments). Truncates the index after this point to remove non-fixed ## value or length index suffixes. # oid_index_length = 0 + + ## Specifies if the value of given field should be snmptranslated + ## by default no field values are translated + # translate = true ``` ### Troubleshooting diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 103b23d214485..7f2df6b689eac 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -3,6 +3,8 @@ package snmp import ( "bufio" "bytes" + "encoding/binary" + "errors" "fmt" "log" "math" @@ -13,19 +15,24 @@ import ( "sync" "time" + "github.com/gosnmp/gosnmp" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/wlog" - "github.com/soniah/gosnmp" ) const description = `Retrieves SNMP values from remote agents` const sampleConfig = ` ## Agent addresses to retrieve values from. + ## format: agents = [":"] + ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. + ## default is udp + ## port: optional ## example: agents = ["udp://127.0.0.1:161"] ## agents = ["tcp://127.0.0.1:161"] + ## agents = ["udp4://v4only-snmp-agent"] agents = ["udp://127.0.0.1:161"] ## Timeout for each request. @@ -50,7 +57,7 @@ const sampleConfig = ` ## ## Security Name. # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". + ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # auth_protocol = "MD5" ## Authentication password. # auth_password = "pass" @@ -166,6 +173,12 @@ type Table struct { // Init() builds & initializes the nested fields. func (t *Table) Init() error { + //makes sure oid or name is set in config file + //otherwise snmp will produce metrics with an empty name + if t.Oid == "" && t.Name == "" { + return fmt.Errorf("SNMP table in config file is not named. One or both of the oid and name settings must be set") + } + if t.initialized { return nil } @@ -237,6 +250,8 @@ type Field struct { // "hwaddr" will convert a 6-byte string to a MAC address. // "ipaddr" will convert the value to an IPv4 or IPv6 address. Conversion string + // Translate tells if the value of the field should be snmptranslated + Translate bool initialized bool } @@ -247,20 +262,22 @@ func (f *Field) init() error { return nil } - _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) - if err != nil { - return fmt.Errorf("translating: %w", err) - } - f.Oid = oidNum - if f.Name == "" { - f.Name = oidText - } - if f.Conversion == "" { - f.Conversion = conversion + // check if oid needs translation or name is not set + if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { + _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) + if err != nil { + return fmt.Errorf("translating: %w", err) + } + f.Oid = oidNum + if f.Name == "" { + f.Name = oidText + } + if f.Conversion == "" { + f.Conversion = conversion + } + //TODO use textual convention conversion from the MIB } - //TODO use textual convention conversion from the MIB - f.initialized = true return nil } @@ -304,7 +321,7 @@ func init() { ClientConfig: snmp.ClientConfig{ Retries: 3, MaxRepetitions: 10, - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), Version: 2, Community: "public", }, @@ -424,7 +441,17 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // empty string. This results in all the non-table fields sharing the same // index, and being added on the same row. if pkt, err := gs.Get([]string{oid}); err != nil { - return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) + if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) { + return nil, fmt.Errorf("unknown security level (sec_level)") + } else if errors.Is(err, gosnmp.ErrUnknownUsername) { + return nil, fmt.Errorf("unknown username (sec_name)") + } else if errors.Is(err, gosnmp.ErrWrongDigest) { + return nil, fmt.Errorf("wrong digest (auth_protocol, auth_password)") + } else if errors.Is(err, gosnmp.ErrDecryption) { + return nil, fmt.Errorf("decryption error (priv_protocol, priv_password)") + } else { + return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) + } } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { ent := pkt.Variables[0] fv, err := fieldConvert(f.Conversion, ent.Value) @@ -451,7 +478,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { i := f.OidIndexLength + 1 // leading separator idx = strings.Map(func(r rune) rune { if r == '.' { - i -= 1 + i-- } if i < 1 { return -1 @@ -460,6 +487,17 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { }, idx) } + // snmptranslate table field value here + if f.Translate { + if entOid, ok := ent.Value.(string); ok { + _, _, oidText, _, err := SnmpTranslate(entOid) + if err == nil { + // If no error translating, the original value for ent.Value should be replaced + ent.Value = oidText + } + } + } + fv, err := fieldConvert(f.Conversion, ent.Value) if err != nil { return &walkError{ @@ -546,7 +584,8 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { if err != nil { return nil, err } - gs.SetAgent(agent) + + err = gs.SetAgent(agent) if err != nil { return nil, err } @@ -561,12 +600,6 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { } // fieldConvert converts from any type according to the conv specification -// "float"/"float(0)" will convert the value into a float. -// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. -// "int" will convert the value into an integer. -// "hwaddr" will convert the value into a MAC address. -// "ipaddr" will convert the value into into an IP address. -// "" will convert a byte slice into a string. func fieldConvert(conv string, v interface{}) (interface{}, error) { if conv == "" { if bs, ok := v.([]byte); ok { @@ -627,7 +660,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case int32: v = int64(vt) case int64: - v = int64(vt) + v = vt case uint: v = int64(vt) case uint8: @@ -658,6 +691,45 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { return v, nil } + split := strings.Split(conv, ":") + if split[0] == "hextoint" && len(split) == 3 { + endian := split[1] + bit := split[2] + + bv, ok := v.([]byte) + if !ok { + return v, nil + } + + if endian == "LittleEndian" { + switch bit { + case "uint64": + v = binary.LittleEndian.Uint64(bv) + case "uint32": + v = binary.LittleEndian.Uint32(bv) + case "uint16": + v = binary.LittleEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + } else if endian == "BigEndian" { + switch bit { + case "uint64": + v = binary.BigEndian.Uint64(bv) + case "uint32": + v = binary.BigEndian.Uint32(bv) + case "uint16": + v = binary.BigEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + } else { + return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) + } + + return v, nil + } + if conv == "ipaddr" { var ipbs []byte @@ -810,28 +882,6 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err } -func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - if snmpTranslateCaches == nil { - snmpTranslateCaches = map[string]snmpTranslateCache{} - } - - var stc snmpTranslateCache - stc.mibName = mibName - stc.oidNum = oidNum - stc.oidText = oidText - stc.conversion = conversion - stc.err = nil - snmpTranslateCaches[oid] = stc -} - -func SnmpTranslateClear() { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - snmpTranslateCaches = map[string]snmpTranslateCache{} -} - func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { var out []byte if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go index c09dd004580da..f87f9029b0d06 100644 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -1,3 +1,4 @@ +//go:build generate // +build generate package main @@ -23,6 +24,7 @@ var mockedCommands = [][]string{ {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.7"}, {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, {"snmptranslate", "-Td", "-Ob", "TEST::server"}, diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 56d9326f1d639..1927db23246b4 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -24,17 +24,17 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { // This is not a real test. This is just a way of mocking out commands. // // Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 -func TestMockExecCommand(t *testing.T) { +func TestMockExecCommand(_ *testing.T) { var cmd []string for _, arg := range os.Args { - if string(arg) == "--" { + if arg == "--" { cmd = []string{} continue } if cmd == nil { continue } - cmd = append(cmd, string(arg)) + cmd = append(cmd, arg) } if cmd == nil { return @@ -44,10 +44,13 @@ func TestMockExecCommand(t *testing.T) { mcr, ok := mockedCommandResults[cmd0] if !ok { cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix + //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) os.Exit(1) } + //nolint:errcheck,revive fmt.Printf("%s", mcr.stdout) + //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "%s", mcr.stderr) if mcr.exitError { os.Exit(1) @@ -69,6 +72,7 @@ var mockedCommandResults = map[string]mockedCommandResult{ "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.7": {stdout: "TEST::testTableEntry.7\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) std(0) testOID(0) testTable(0) testTableEntry(1) 7 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 583b2dc847282..f447f13c54e67 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -9,13 +9,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" - config "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" - "github.com/soniah/gosnmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -81,6 +80,7 @@ var tsc = &testSNMPConnection{ ".1.0.0.1.3": []byte("byte slice"), ".1.0.0.2.1.5.0.9.9": 11, ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", }, } @@ -92,8 +92,8 @@ func TestSampleConfig(t *testing.T) { expected := &Snmp{ Agents: []string{"udp://127.0.0.1:161"}, AgentHostTag: "", - ClientConfig: config.ClientConfig{ - Timeout: internal.Duration{Duration: 5 * time.Second}, + ClientConfig: snmp.ClientConfig{ + Timeout: config.Duration(5 * time.Second), Version: 2, Community: "public", MaxRepetitions: 10, @@ -199,11 +199,12 @@ func TestSnmpInit_noTranslate(t *testing.T) { {Oid: ".1.1.1.3"}, }, Tables: []Table{ - {Fields: []Field{ - {Oid: ".1.1.1.4", Name: "four", IsTag: true}, - {Oid: ".1.1.1.5", Name: "five"}, - {Oid: ".1.1.1.6"}, - }}, + {Name: "testing", + Fields: []Field{ + {Oid: ".1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".1.1.1.5", Name: "five"}, + {Oid: ".1.1.1.6"}, + }}, }, } @@ -235,11 +236,26 @@ func TestSnmpInit_noTranslate(t *testing.T) { assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) } +func TestSnmpInit_noName_noOid(t *testing.T) { + s := &Snmp{ + Tables: []Table{ + {Fields: []Field{ + {Oid: ".1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".1.1.1.5", Name: "five"}, + {Oid: ".1.1.1.6"}, + }}, + }, + } + + err := s.init() + require.Error(t, err) +} + func TestGetSNMPConnection_v2(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"}, - ClientConfig: config.ClientConfig{ - Timeout: internal.Duration{Duration: 3 * time.Second}, + ClientConfig: snmp.ClientConfig{ + Timeout: config.Duration(3 * time.Second), Retries: 4, Version: 2, Community: "foo", @@ -307,7 +323,7 @@ func stubTCPServer(wg *sync.WaitGroup) { func TestGetSNMPConnection_v3(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4"}, - ClientConfig: config.ClientConfig{ + ClientConfig: snmp.ClientConfig{ Version: 3, MaxRepetitions: 20, ContextName: "mycontext", @@ -344,6 +360,125 @@ func TestGetSNMPConnection_v3(t *testing.T) { assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) } +func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { + testCases := []struct { + Name string + Algorithm gosnmp.SnmpV3PrivProtocol + Config *Snmp + }{ + { + Name: "AES192", + Algorithm: gosnmp.AES192, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES192", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + { + Name: "AES192C", + Algorithm: gosnmp.AES192C, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES192C", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + { + Name: "AES256", + Algorithm: gosnmp.AES256, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES256", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + { + Name: "AES256C", + Algorithm: gosnmp.AES256C, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES256C", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + s := tc.Config + err := s.init() + require.NoError(t, err) + + gsc, err := s.getConnection(0) + require.NoError(t, err) + gs := gsc.(snmp.GosnmpWrapper) + assert.Equal(t, gs.Version, gosnmp.Version3) + sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) + assert.Equal(t, "1.2.3.4", gsc.Host()) + assert.EqualValues(t, 20, gs.MaxRepetitions) + assert.Equal(t, "mycontext", gs.ContextName) + assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + assert.Equal(t, "myuser", sp.UserName) + assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + assert.Equal(t, "password123", sp.AuthenticationPassphrase) + assert.Equal(t, tc.Algorithm, sp.PrivacyProtocol) + assert.Equal(t, "password123", sp.PrivacyPassphrase) + assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) + assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) + }) + } +} + func TestGetSNMPConnection_caching(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4", "1.2.3.5", "1.2.3.5"}, @@ -368,8 +503,8 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { t.Skip("Skipping test due to random failures.") } srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) - defer srvr.Close() require.NoError(t, err) + defer srvr.Close() reqCount := 0 // Set up a WaitGroup to wait for the server goroutine to exit and protect // reqCount. @@ -387,7 +522,10 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { } reqCount++ - srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + // will cause decoding error + if _, err := srvr.WriteTo([]byte{'X'}, addr); err != nil { + return + } } }() @@ -407,7 +545,7 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { GoSNMP: gs, } err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil }) - srvr.Close() + assert.NoError(t, srvr.Close()) wg.Wait() assert.Error(t, err) assert.False(t, gs.Conn == conn) @@ -418,8 +556,8 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { // TODO: Fix this test t.Skip("Test failing too often, skip for now and revisit later.") srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) - defer srvr.Close() require.NoError(t, err) + defer srvr.Close() reqCount := 0 // Set up a WaitGroup to wait for the server goroutine to exit and protect // reqCount. @@ -437,7 +575,10 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { } reqCount++ - srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + // will cause decoding error + if _, err := srvr.WriteTo([]byte{'X'}, addr); err != nil { + return + } } }() @@ -457,7 +598,7 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { GoSNMP: gs, } _, err = gsw.Get([]string{".1.0.0"}) - srvr.Close() + require.NoError(t, srvr.Close()) wg.Wait() assert.Error(t, err) assert.False(t, gs.Conn == conn) @@ -493,6 +634,16 @@ func TestTableBuild_walk(t *testing.T) { Oid: ".1.0.0.2.1.5", OidIndexLength: 1, }, + { + Name: "myfield6", + Oid: ".1.0.0.0.1.6", + Translate: true, + }, + { + Name: "myfield7", + Oid: ".1.0.0.0.1.6", + Translate: false, + }, }, } @@ -510,6 +661,8 @@ func TestTableBuild_walk(t *testing.T) { "myfield3": float64(0.123), "myfield4": 11, "myfield5": 11, + "myfield6": "testTableEntry.7", + "myfield7": ".1.0.0.0.1.7", }, } rtr2 := RTableRow{ @@ -628,7 +781,7 @@ func TestGather(t *testing.T) { acc := &testutil.Accumulator{} tstart := time.Now() - s.Gather(acc) + require.NoError(t, s.Gather(acc)) tstop := time.Now() require.Len(t, acc.Metrics, 2) @@ -640,8 +793,8 @@ func TestGather(t *testing.T) { assert.Len(t, m.Fields, 2) assert.Equal(t, 234, m.Fields["myfield2"]) assert.Equal(t, "baz", m.Fields["myfield3"]) - assert.True(t, tstart.Before(m.Time)) - assert.True(t, tstop.After(m.Time)) + assert.True(t, !tstart.After(m.Time)) + assert.True(t, !tstop.Before(m.Time)) m2 := acc.Metrics[1] assert.Equal(t, "myOtherTable", m2.Measurement) @@ -675,7 +828,7 @@ func TestGather_host(t *testing.T) { acc := &testutil.Accumulator{} - s.Gather(acc) + require.NoError(t, s.Gather(acc)) require.Len(t, acc.Metrics, 1) m := acc.Metrics[0] @@ -726,6 +879,12 @@ func TestFieldConvert(t *testing.T) { {[]byte("abcd"), "ipaddr", "97.98.99.100"}, {"abcd", "ipaddr", "97.98.99.100"}, {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:BigEndian:uint64", uint64(2602423610063712)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:BigEndian:uint32", uint32(605923)}, + {[]byte{0x00, 0x09}, "hextoint:BigEndian:uint16", uint16(9)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:LittleEndian:uint64", uint64(6934371307618175232)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:LittleEndian:uint32", uint32(3812493568)}, + {[]byte{0x00, 0x09}, "hextoint:LittleEndian:uint16", uint16(2304)}, } for _, tc := range testTable { diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md index 06bebbcad6176..8e639900ffe0f 100644 --- a/plugins/inputs/snmp_legacy/README.md +++ b/plugins/inputs/snmp_legacy/README.md @@ -1,5 +1,7 @@ # SNMP Legacy Input Plugin +### Deprecated in version 1.0. Use [SNMP input plugin][]. + The SNMP input plugin gathers metrics from SNMP agents ### Configuration: @@ -547,3 +549,5 @@ ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 ``` + +[SNMP input plugin]: /plugins/inputs/snmp diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 62a3966fa451a..604a2205c0d2c 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,9 +1,9 @@ package snmp_legacy import ( - "io/ioutil" "log" "net" + "os" "strconv" "strings" "time" @@ -11,7 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" ) // Snmp is a snmp plugin @@ -102,7 +102,7 @@ type Data struct { // Unit Unit string // SNMP getbulk max repetition - MaxRepetition uint8 `toml:"max_repetition"` + MaxRepetition uint32 `toml:"max_repetition"` // SNMP Instance (default 0) // (only used with GET request and if // OID is a name from snmptranslate file) @@ -230,23 +230,23 @@ func (s *Snmp) Description() string { return `DEPRECATED! PLEASE USE inputs.snmp INSTEAD.` } -func fillnode(parentNode Node, oid_name string, ids []string) { +func fillnode(parentNode Node, oidName string, ids []string) { // ids = ["1", "3", "6", ...] id, ids := ids[0], ids[1:] node, ok := parentNode.subnodes[id] - if ok == false { + if !ok { node = Node{ id: id, name: "", subnodes: make(map[string]Node), } if len(ids) == 0 { - node.name = oid_name + node.name = oidName } parentNode.subnodes[id] = node } if len(ids) > 0 { - fillnode(node, oid_name, ids) + fillnode(node, oidName, ids) } } @@ -268,7 +268,7 @@ func findnodename(node Node, ids []string) (string, string) { return node.name, "0" } else if node.name != "" && len(ids) == 0 && id != "0" { // node with an instance - return node.name, string(id) + return node.name, id } else if node.name != "" && len(ids) > 0 { // node with subinstances return node.name, strings.Join(ids, ".") @@ -296,19 +296,19 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { subnodes: make(map[string]Node), } - data, err := ioutil.ReadFile(s.SnmptranslateFile) + data, err := os.ReadFile(s.SnmptranslateFile) if err != nil { s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err - } else { - for _, line := range strings.Split(string(data), "\n") { - oids := strings.Fields(string(line)) - if len(oids) == 2 && oids[1] != "" { - oid_name := oids[0] - oid := oids[1] - fillnode(s.initNode, oid_name, strings.Split(string(oid), ".")) - s.nameToOid[oid_name] = oid - } + } + + for _, line := range strings.Split(string(data), "\n") { + oids := strings.Fields(line) + if len(oids) == 2 && oids[1] != "" { + oidName := oids[0] + oid := oids[1] + fillnode(s.initNode, oidName, strings.Split(oid, ".")) + s.nameToOid[oidName] = oid } } } @@ -339,7 +339,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } else { oid.Name = oidstring oid.Oid = oidstring - if string(oidstring[:1]) != "." { + if oidstring[:1] != "." { oid.rawOid = "." + oidstring } else { oid.rawOid = oidstring @@ -348,10 +348,10 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { host.getOids = append(host.getOids, oid) } - for _, oid_name := range host.Collect { + for _, oidName := range host.Collect { // Get GET oids for _, oid := range s.Get { - if oid.Name == oid_name { + if oid.Name == oidName { if val, ok := s.nameToOid[oid.Oid]; ok { // TODO should we add the 0 instance ? if oid.Instance != "" { @@ -367,7 +367,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } // Get GETBULK oids for _, oid := range s.Bulk { - if oid.Name == oid_name { + if oid.Name == oidName { if val, ok := s.nameToOid[oid.Oid]; ok { oid.rawOid = "." + val } else { @@ -395,7 +395,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // to do it only the first time // only if len(s.OidInstanceMapping) == 0 if len(host.OidInstanceMapping) >= 0 { - if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { + if err := host.SNMPMap(s.nameToOid, s.subTableMap); err != nil { s.Log.Errorf("Mapping error for host %q: %s", host.Address, err.Error()) continue } @@ -412,7 +412,6 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } func (h *Host) SNMPMap( - acc telegraf.Accumulator, nameToOid map[string]string, subTableMap map[string]Subtable, ) error { @@ -473,15 +472,15 @@ func (h *Host) SNMPMap( // We need to query this table // To get mapping between instance id // and instance name - oid_asked := table.mappingTable - oid_next := oid_asked - need_more_requests := true + oidAsked := table.mappingTable + oidNext := oidAsked + needMoreRequests := true // Set max repetition - maxRepetition := uint8(32) + maxRepetition := uint32(32) // Launch requests - for need_more_requests { + for needMoreRequests { // Launch request - result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition) + result, err3 := snmpClient.GetBulk([]string{oidNext}, 0, maxRepetition) if err3 != nil { return err3 } @@ -489,7 +488,7 @@ func (h *Host) SNMPMap( lastOid := "" for _, variable := range result.Variables { lastOid = variable.Name - if strings.HasPrefix(variable.Name, oid_asked) { + if strings.HasPrefix(variable.Name, oidAsked) { switch variable.Type { // handle instance names case gosnmp.OctetString: @@ -519,7 +518,7 @@ func (h *Host) SNMPMap( // remove oid table from the complete oid // in order to get the current instance id - key := strings.Replace(variable.Name, oid_asked, "", 1) + key := strings.Replace(variable.Name, oidAsked, "", 1) if len(table.subTables) == 0 { // We have a mapping table @@ -581,11 +580,11 @@ func (h *Host) SNMPMap( } } // Determine if we need more requests - if strings.HasPrefix(lastOid, oid_asked) { - need_more_requests = true - oid_next = lastOid + if strings.HasPrefix(lastOid, oidAsked) { + needMoreRequests = true + oidNext = lastOid } else { - need_more_requests = false + needMoreRequests = false } } } @@ -617,15 +616,15 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { // gosnmp.MAX_OIDS == 60 // TODO use gosnmp.MAX_OIDS instead of hard coded value - max_oids := 60 + maxOids := 60 // limit 60 (MAX_OIDS) oids by requests - for i := 0; i < len(oidsList); i = i + max_oids { + for i := 0; i < len(oidsList); i = i + maxOids { // Launch request - max_index := i + max_oids - if i+max_oids > len(oidsList) { - max_index = len(oidsList) + maxIndex := i + maxOids + if i+maxOids > len(oidsList) { + maxIndex = len(oidsList) } - result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS + result, err3 := snmpClient.Get(oidsNameList[i:maxIndex]) // Get() accepts up to g.MAX_OIDS if err3 != nil { return err3 } @@ -658,31 +657,31 @@ func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error { // TODO Trying to make requests with more than one OID // to reduce the number of requests for _, oid := range oidsNameList { - oid_asked := oid - need_more_requests := true + oidAsked := oid + needMoreRequests := true // Set max repetition maxRepetition := oidsList[oid].MaxRepetition if maxRepetition <= 0 { maxRepetition = 32 } // Launch requests - for need_more_requests { + for needMoreRequests { // Launch request result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition) if err3 != nil { return err3 } // Handle response - last_oid, err := h.HandleResponse(oidsList, result, acc, initNode) + lastOid, err := h.HandleResponse(oidsList, result, acc, initNode) if err != nil { return err } // Determine if we need more requests - if strings.HasPrefix(last_oid, oid_asked) { - need_more_requests = true - oid = last_oid + if strings.HasPrefix(lastOid, oidAsked) { + needMoreRequests = true + oid = lastOid } else { - need_more_requests = false + needMoreRequests = false } } } @@ -700,16 +699,16 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { version = gosnmp.Version2c } // Prepare host and port - host, port_str, err := net.SplitHostPort(h.Address) + host, portStr, err := net.SplitHostPort(h.Address) if err != nil { - port_str = string("161") + portStr = string("161") } // convert port_str to port in uint16 - port_64, err := strconv.ParseUint(port_str, 10, 16) + port64, err := strconv.ParseUint(portStr, 10, 16) if err != nil { return nil, err } - port := uint16(port_64) + port := uint16(port64) // Get SNMP client snmpClient := &gosnmp.GoSNMP{ Target: host, @@ -739,7 +738,7 @@ func (h *Host) HandleResponse( lastOid = variable.Name nextresult: // Get only oid wanted - for oid_key, oid := range oids { + for oidKey, oid := range oids { // Skip oids already processed for _, processedOid := range h.processedOids { if variable.Name == processedOid { @@ -750,7 +749,7 @@ func (h *Host) HandleResponse( // OR // the result is SNMP table which "." comes right after oid_key. // ex: oid_key: .1.3.6.1.2.1.2.2.1.16, variable.Name: .1.3.6.1.2.1.2.2.1.16.1 - if variable.Name == oid_key || strings.HasPrefix(variable.Name, oid_key+".") { + if variable.Name == oidKey || strings.HasPrefix(variable.Name, oidKey+".") { switch variable.Type { // handle Metrics case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32, @@ -761,19 +760,19 @@ func (h *Host) HandleResponse( tags["unit"] = oid.Unit } // Get name and instance - var oid_name string + var oidName string var instance string // Get oidname and instance from translate file - oid_name, instance = findnodename(initNode, - strings.Split(string(variable.Name[1:]), ".")) + oidName, instance = findnodename(initNode, + strings.Split(variable.Name[1:], ".")) // Set instance tag // From mapping table - mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key] + mapping, inMappingNoSubTable := h.OidInstanceMapping[oidKey] if inMappingNoSubTable { // filter if the instance in not in // OidInstanceMapping mapping map - if instance_name, exists := mapping[instance]; exists { - tags["instance"] = instance_name + if instanceName, exists := mapping[instance]; exists { + tags["instance"] = instanceName } else { continue } @@ -788,24 +787,24 @@ func (h *Host) HandleResponse( } // Set name - var field_name string - if oid_name != "" { + var fieldName string + if oidName != "" { // Set fieldname as oid name from translate file - field_name = oid_name + fieldName = oidName } else { // Set fieldname as oid name from inputs.snmp.get section // Because the result oid is equal to inputs.snmp.get section - field_name = oid.Name + fieldName = oid.Name } tags["snmp_host"], _, _ = net.SplitHostPort(h.Address) fields := make(map[string]interface{}) - fields[string(field_name)] = variable.Value + fields[fieldName] = variable.Value h.processedOids = append(h.processedOids, variable.Name) - acc.AddFields(field_name, fields, tags) + acc.AddFields(fieldName, fields, tags) case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: // Oid not found - log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key) + log.Printf("E! [inputs.snmp_legacy] oid %q not found", oidKey) default: // delete other data } diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index 0680376c400db..f117c35cbeb56 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -31,6 +31,10 @@ information. ## 1024. See README.md for details ## # service_address = "udp://:162" + ## + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## ## Timeout running snmptranslate command # timeout = "5s" ## Snmp version diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index d380d582bad66..9fffd8968d593 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -1,27 +1,24 @@ package snmp_trap import ( - "bufio" - "bytes" "fmt" "net" - "os/exec" + "os" + "path/filepath" "strconv" "strings" - "sync" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/sleepinggenius2/gosmi" + "github.com/sleepinggenius2/gosmi/types" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" ) -var defaultTimeout = internal.Duration{Duration: time.Second * 5} - -type handler func(*gosnmp.SnmpPacket, *net.UDPAddr) -type execer func(internal.Duration, string, ...string) ([]byte, error) +var defaultTimeout = config.Duration(time.Second * 5) type mibEntry struct { mibName string @@ -29,9 +26,10 @@ type mibEntry struct { } type SnmpTrap struct { - ServiceAddress string `toml:"service_address"` - Timeout internal.Duration `toml:"timeout"` - Version string `toml:"version"` + ServiceAddress string `toml:"service_address"` + Timeout config.Duration `toml:"timeout"` + Version string `toml:"version"` + Path []string `toml:"path"` // Settings for version 3 // Values: "noAuthNoPriv", "authNoPriv", "authPriv" @@ -44,19 +42,15 @@ type SnmpTrap struct { PrivProtocol string `toml:"priv_protocol"` PrivPassword string `toml:"priv_password"` - acc telegraf.Accumulator - listener *gosnmp.TrapListener - timeFunc func() time.Time - errCh chan error + acc telegraf.Accumulator + listener *gosnmp.TrapListener + timeFunc func() time.Time + lookupFunc func(string) (mibEntry, error) + errCh chan error - makeHandlerWrapper func(handler) handler + makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc Log telegraf.Logger `toml:"-"` - - cacheLock sync.Mutex - cache map[string]mibEntry - - execCmd execer } var sampleConfig = ` @@ -68,6 +62,10 @@ var sampleConfig = ` ## 1024. See README.md for details ## # service_address = "udp://:162" + ## + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## ## Timeout running snmptranslate command # timeout = "5s" ## Snmp version, defaults to 2c @@ -104,6 +102,7 @@ func init() { inputs.Add("snmp_trap", func() telegraf.Input { return &SnmpTrap{ timeFunc: time.Now, + lookupFunc: lookup, ServiceAddress: "udp://:162", Timeout: defaultTimeout, Version: "2c", @@ -111,20 +110,50 @@ func init() { }) } -func realExecCmd(Timeout internal.Duration, arg0 string, args ...string) ([]byte, error) { - cmd := exec.Command(arg0, args...) - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) +func (s *SnmpTrap) Init() error { + // must init, append path for each directory, load module for every file + // or gosmi will fail without saying why + gosmi.Init() + err := s.getMibsPath() if err != nil { - return nil, err + s.Log.Errorf("Could not get path %v", err) } - return out.Bytes(), nil + return nil } -func (s *SnmpTrap) Init() error { - s.cache = map[string]mibEntry{} - s.execCmd = realExecCmd +func (s *SnmpTrap) getMibsPath() error { + var folders []string + for _, mibPath := range s.Path { + gosmi.AppendPath(mibPath) + folders = append(folders, mibPath) + err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { + if info.Mode()&os.ModeSymlink != 0 { + s, _ := os.Readlink(path) + folders = append(folders, s) + } + return nil + }) + if err != nil { + s.Log.Errorf("Filepath could not be walked %v", err) + } + for _, folder := range folders { + err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + gosmi.AppendPath(path) + } else if info.Mode()&os.ModeSymlink == 0 { + _, err := gosmi.LoadModule(info.Name()) + if err != nil { + s.Log.Errorf("Module could not be loaded %v", err) + } + } + return nil + }) + if err != nil { + s.Log.Errorf("Filepath could not be walked %v", err) + } + } + folders = []string{} + } return nil } @@ -206,7 +235,6 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { AuthenticationPassphrase: s.AuthPassword, AuthenticationProtocol: authenticationProtocol, } - } // wrap the handler, used in unit tests @@ -249,6 +277,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { func (s *SnmpTrap) Stop() { s.listener.Close() + defer gosmi.Exit() err := <-s.errCh if nil != err { s.Log.Errorf("Error stopping trap listener %v", err) @@ -261,7 +290,7 @@ func setTrapOid(tags map[string]string, oid string, e mibEntry) { tags["mib"] = e.mibName } -func makeTrapHandler(s *SnmpTrap) handler { +func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) { tm := s.timeFunc() fields := map[string]interface{}{} @@ -282,9 +311,9 @@ func makeTrapHandler(s *SnmpTrap) handler { } if trapOid != "" { - e, err := s.lookup(trapOid) + e, err := s.lookupFunc(trapOid) if err != nil { - s.Log.Errorf("Error resolving V1 OID: %v", err) + s.Log.Errorf("Error resolving V1 OID, oid=%s, source=%s: %v", trapOid, tags["source"], err) return } setTrapOid(tags, trapOid, e) @@ -320,9 +349,9 @@ func makeTrapHandler(s *SnmpTrap) handler { var e mibEntry var err error - e, err = s.lookup(val) + e, err = s.lookupFunc(val) if nil != err { - s.Log.Errorf("Error resolving value OID: %v", err) + s.Log.Errorf("Error resolving value OID, oid=%s, source=%s: %v", val, tags["source"], err) return } @@ -338,9 +367,9 @@ func makeTrapHandler(s *SnmpTrap) handler { value = v.Value } - e, err := s.lookup(v.Name) + e, err := s.lookupFunc(v.Name) if nil != err { - s.Log.Errorf("Error resolving OID: %v", err) + s.Log.Errorf("Error resolving OID oid=%s, source=%s: %v", v.Name, tags["source"], err) return } @@ -367,48 +396,16 @@ func makeTrapHandler(s *SnmpTrap) handler { } } -func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - var ok bool - if e, ok = s.cache[oid]; !ok { - // cache miss. exec snmptranslate - e, err = s.snmptranslate(oid) - if err == nil { - s.cache[oid] = e - } - return e, err - } - return e, nil -} - -func (s *SnmpTrap) clear() { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - s.cache = map[string]mibEntry{} -} - -func (s *SnmpTrap) load(oid string, e mibEntry) { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - s.cache[oid] = e -} - -func (s *SnmpTrap) snmptranslate(oid string) (e mibEntry, err error) { - var out []byte - out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid) +func lookup(oid string) (e mibEntry, err error) { + var node gosmi.SmiNode + node, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + // ensure modules are loaded or node will be empty (might not error) if err != nil { return e, err } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - ok := scanner.Scan() - if err = scanner.Err(); !ok && err != nil { - return e, err - } - - e.oidText = scanner.Text() + e.oidText = node.RenderQualified() i := strings.Index(e.oidText, "::") if i == -1 { diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index b5f8da27aa7b3..f917a7bbff918 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -3,152 +3,133 @@ package snmp_trap import ( "fmt" "net" + "path/filepath" "strconv" "strings" "testing" "time" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func TestLoad(t *testing.T) { - s := &SnmpTrap{} - require.Nil(t, s.Init()) - - defer s.clear() - s.load( - ".1.3.6.1.6.3.1.1.5.1", - mibEntry{ - "SNMPv2-MIB", - "coldStart", - }, - ) - - e, err := s.lookup(".1.3.6.1.6.3.1.1.5.1") - require.NoError(t, err) - require.Equal(t, "SNMPv2-MIB", e.mibName) - require.Equal(t, "coldStart", e.oidText) -} +func newMsgFlagsV3(secLevel string) gosnmp.SnmpV3MsgFlags { + var msgFlags gosnmp.SnmpV3MsgFlags + switch strings.ToLower(secLevel) { + case "noauthnopriv", "": + msgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + msgFlags = gosnmp.AuthNoPriv + case "authpriv": + msgFlags = gosnmp.AuthPriv + default: + msgFlags = gosnmp.NoAuthNoPriv + } -func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { - return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) + return msgFlags } -func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, secLevel string, username string, authProto string, authPass string, privProto string, privPass string, contextName string, engineID string) { - var s gosnmp.GoSNMP +func newUsmSecurityParametersForV3(authProto string, privProto string, username string, privPass string, authPass string) *gosnmp.UsmSecurityParameters { + var authenticationProtocol gosnmp.SnmpV3AuthProtocol + switch strings.ToLower(authProto) { + case "md5": + authenticationProtocol = gosnmp.MD5 + case "sha": + authenticationProtocol = gosnmp.SHA + //case "sha224": + // authenticationProtocol = gosnmp.SHA224 + //case "sha256": + // authenticationProtocol = gosnmp.SHA256 + //case "sha384": + // authenticationProtocol = gosnmp.SHA384 + //case "sha512": + // authenticationProtocol = gosnmp.SHA512 + case "": + authenticationProtocol = gosnmp.NoAuth + default: + authenticationProtocol = gosnmp.NoAuth + } - if version == gosnmp.Version3 { - var msgFlags gosnmp.SnmpV3MsgFlags - switch strings.ToLower(secLevel) { - case "noauthnopriv", "": - msgFlags = gosnmp.NoAuthNoPriv - case "authnopriv": - msgFlags = gosnmp.AuthNoPriv - case "authpriv": - msgFlags = gosnmp.AuthPriv - default: - msgFlags = gosnmp.NoAuthNoPriv - } + var privacyProtocol gosnmp.SnmpV3PrivProtocol + switch strings.ToLower(privProto) { + case "aes": + privacyProtocol = gosnmp.AES + case "des": + privacyProtocol = gosnmp.DES + case "aes192": + privacyProtocol = gosnmp.AES192 + case "aes192c": + privacyProtocol = gosnmp.AES192C + case "aes256": + privacyProtocol = gosnmp.AES256 + case "aes256c": + privacyProtocol = gosnmp.AES256C + case "": + privacyProtocol = gosnmp.NoPriv + default: + privacyProtocol = gosnmp.NoPriv + } - var authenticationProtocol gosnmp.SnmpV3AuthProtocol - switch strings.ToLower(authProto) { - case "md5": - authenticationProtocol = gosnmp.MD5 - case "sha": - authenticationProtocol = gosnmp.SHA - //case "sha224": - // authenticationProtocol = gosnmp.SHA224 - //case "sha256": - // authenticationProtocol = gosnmp.SHA256 - //case "sha384": - // authenticationProtocol = gosnmp.SHA384 - //case "sha512": - // authenticationProtocol = gosnmp.SHA512 - case "": - authenticationProtocol = gosnmp.NoAuth - default: - authenticationProtocol = gosnmp.NoAuth - } + return &gosnmp.UsmSecurityParameters{ + AuthoritativeEngineID: "1", + AuthoritativeEngineBoots: 1, + AuthoritativeEngineTime: 1, + UserName: username, + PrivacyProtocol: privacyProtocol, + PrivacyPassphrase: privPass, + AuthenticationPassphrase: authPass, + AuthenticationProtocol: authenticationProtocol, + } +} - var privacyProtocol gosnmp.SnmpV3PrivProtocol - switch strings.ToLower(privProto) { - case "aes": - privacyProtocol = gosnmp.AES - case "des": - privacyProtocol = gosnmp.DES - case "aes192": - privacyProtocol = gosnmp.AES192 - case "aes192c": - privacyProtocol = gosnmp.AES192C - case "aes256": - privacyProtocol = gosnmp.AES256 - case "aes256c": - privacyProtocol = gosnmp.AES256C - case "": - privacyProtocol = gosnmp.NoPriv - default: - privacyProtocol = gosnmp.NoPriv - } +func newGoSNMPV3(port uint16, contextName string, engineID string, msgFlags gosnmp.SnmpV3MsgFlags, sp *gosnmp.UsmSecurityParameters) gosnmp.GoSNMP { + return gosnmp.GoSNMP{ + Port: port, + Version: gosnmp.Version3, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + SecurityParameters: sp, + SecurityModel: gosnmp.UserSecurityModel, + MsgFlags: msgFlags, + ContextName: contextName, + ContextEngineID: engineID, + } +} - sp := &gosnmp.UsmSecurityParameters{ - AuthoritativeEngineID: "1", - AuthoritativeEngineBoots: 1, - AuthoritativeEngineTime: 1, - UserName: username, - PrivacyProtocol: privacyProtocol, - PrivacyPassphrase: privPass, - AuthenticationPassphrase: authPass, - AuthenticationProtocol: authenticationProtocol, - } - s = gosnmp.GoSNMP{ - Port: port, - Version: version, - Timeout: time.Duration(2) * time.Second, - Retries: 1, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", - SecurityParameters: sp, - SecurityModel: gosnmp.UserSecurityModel, - MsgFlags: msgFlags, - ContextName: contextName, - ContextEngineID: engineID, - } - } else { - s = gosnmp.GoSNMP{ - Port: port, - Version: version, - Timeout: time.Duration(2) * time.Second, - Retries: 1, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", - Community: "public", - } +func newGoSNMP(version gosnmp.SnmpVersion, port uint16) gosnmp.GoSNMP { + return gosnmp.GoSNMP{ + Port: port, + Version: version, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + Community: "public", } +} - err := s.Connect() +func sendTrap(t *testing.T, goSNMP gosnmp.GoSNMP, trap gosnmp.SnmpTrap) { + err := goSNMP.Connect() if err != nil { t.Errorf("Connect() err: %v", err) } - defer s.Conn.Close() + defer goSNMP.Conn.Close() - _, err = s.SendTrap(trap) + _, err = goSNMP.SendTrap(trap) if err != nil { t.Errorf("SendTrap() err: %v", err) } } func TestReceiveTrap(t *testing.T) { - var now uint32 - now = 123123123 - - var fakeTime time.Time - fakeTime = time.Unix(456456456, 456) + now := uint32(123123123) + fakeTime := time.Unix(456456456, 456) type entry struct { oid string @@ -1266,7 +1247,7 @@ func TestReceiveTrap(t *testing.T) { // Hook into the trap handler so the test knows when the // trap has been received received := make(chan int) - wrap := func(f handler) handler { + wrap := func(f gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc { return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { f(p, a) received <- 0 @@ -1280,6 +1261,15 @@ func TestReceiveTrap(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, + lookupFunc: func(input string) (mibEntry, error) { + for _, entry := range tt.entries { + if input == entry.oid { + return mibEntry{entry.e.mibName, entry.e.oidText}, nil + } + } + return mibEntry{}, fmt.Errorf("Unexpected oid") + }, + //if cold start be answer otherwise err Log: testutil.Logger{}, Version: tt.version.String(), SecName: tt.secName, @@ -1289,21 +1279,24 @@ func TestReceiveTrap(t *testing.T) { PrivProtocol: tt.privProto, PrivPassword: tt.privPass, } - require.Nil(t, s.Init()) - // Don't look up oid with snmptranslate. - s.execCmd = fakeExecCmd + + require.NoError(t, s.Init()) + var acc testutil.Accumulator require.Nil(t, s.Start(&acc)) defer s.Stop() - // Preload the cache with the oids we'll use in this test - // so snmptranslate and mibs don't need to be installed. - for _, entry := range tt.entries { - s.load(entry.oid, entry.e) + var goSNMP gosnmp.GoSNMP + if tt.version == gosnmp.Version3 { + msgFlags := newMsgFlagsV3(tt.secLevel) + sp := newUsmSecurityParametersForV3(tt.authProto, tt.privProto, tt.secName, tt.privPass, tt.authPass) + goSNMP = newGoSNMPV3(port, tt.contextName, tt.engineID, msgFlags, sp) + } else { + goSNMP = newGoSNMP(tt.version, port) } // Send the trap - sendTrap(t, port, now, tt.trap, tt.version, tt.secLevel, tt.secName, tt.authProto, tt.authPass, tt.privProto, tt.privPass, tt.contextName, tt.engineID) + sendTrap(t, goSNMP, tt.trap) // Wait for trap to be received select { @@ -1320,3 +1313,96 @@ func TestReceiveTrap(t *testing.T) { } } + +func TestGosmiSingleMib(t *testing.T) { + // We would prefer to specify port 0 and let the network + // stack choose an unused port for us but TrapListener + // doesn't have a way to return the autoselected port. + // Instead, we'll use an unusual port and hope it's + // unused. + const port = 12399 + + // Hook into the trap handler so the test knows when the + // trap has been received + received := make(chan int) + wrap := func(f gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc { + return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { + f(p, a) + received <- 0 + } + } + + fakeTime := time.Unix(456456456, 456) + now := uint32(123123123) + + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + trap := gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + } + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "2c", + "source": "127.0.0.1", + "community": "public", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + } + + // Set up the service input plugin + s := &SnmpTrap{ + ServiceAddress: "udp://:" + strconv.Itoa(port), + makeHandlerWrapper: wrap, + timeFunc: func() time.Time { + return fakeTime + }, + lookupFunc: lookup, + Log: testutil.Logger{}, + Version: "2c", + Path: []string{testDataPath}, + } + require.NoError(t, s.Init()) + + var acc testutil.Accumulator + require.Nil(t, s.Start(&acc)) + defer s.Stop() + + goSNMP := newGoSNMP(gosnmp.Version2c, port) + + // Send the trap + sendTrap(t, goSNMP, trap) + + // Wait for trap to be received + select { + case <-received: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for trap to be received") + } + + // Verify plugin output + testutil.RequireMetricsEqual(t, + metrics, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) +} diff --git a/plugins/inputs/snmp_trap/testdata/test.mib b/plugins/inputs/snmp_trap/testdata/test.mib new file mode 100644 index 0000000000000..d8ff17af04eba --- /dev/null +++ b/plugins/inputs/snmp_trap/testdata/test.mib @@ -0,0 +1,40 @@ +SNMPv2-MIB DEFINITIONS ::= BEGIN + +IMPORTS + NOTIFICATION-TYPE, NOTIFICATION-GROUP + FROM test2; + + +snmpMIB MODULE-IDENTITY + LAST-UPDATED "2021060900Z" + ORGANIZATION "testing" + CONTACT-INFO + "EMail: testing@emai.com" + DESCRIPTION + "MIB module for testing snmp_trap plugin + for telegraf + " + ::={ coldStart 1 } + +snmpMIBObjects OBJECT IDENTIFIER ::= { snmpMIB 1 } + +system OBJECT IDENTIFIER ::= { sysUpTimeInstance 1 } + +coldStart NOTIFICATION-TYPE + STATUS current + DESCRIPTION + "A coldStart trap signifies that the SNMP entity, + supporting a notification originator application, is + reinitializing itself and that its configuration may + have been altered." + ::= { snmpTraps 1 } + +snmpBasicNotificationsGroup NOTIFICATION-GROUP + NOTIFICATIONS { coldStart, authenticationFailure } + STATUS current + DESCRIPTION + "The basic notifications implemented by an SNMP entity + supporting command responder applications." + ::= { snmpMIBGroups 7 } + +END diff --git a/plugins/inputs/snmp_trap/testdata/test2 b/plugins/inputs/snmp_trap/testdata/test2 new file mode 100644 index 0000000000000..e4950b902d803 --- /dev/null +++ b/plugins/inputs/snmp_trap/testdata/test2 @@ -0,0 +1,97 @@ +SNMPv2-MIB DEFINITIONS ::= BEGIN + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +sysUpTimeInstance OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { sysUpTimeInstance 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +coldStart OBJECT IDENTIFIER ::= { snmpV2 3 } + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +NOTIFICATION-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + NotificationsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + NotificationsPart ::= + "NOTIFICATIONS" "{" Notifications "}" + Notifications ::= + Notification + | Notifications "," Notification + Notification ::= + value(NotificationName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +END \ No newline at end of file diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index e412996f38e6e..362316ee5c468 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -13,6 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -47,9 +48,12 @@ func (ssl *streamSocketListener) listen() { break } - if ssl.ReadBufferSize.Size > 0 { + if ssl.ReadBufferSize > 0 { if srb, ok := c.(setReadBufferer); ok { - srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)) + if err := srb.SetReadBuffer(int(ssl.ReadBufferSize)); err != nil { + ssl.Log.Error(err.Error()) + break + } } else { ssl.Log.Warnf("Unable to set read buffer on a %s socket", ssl.sockType) } @@ -58,6 +62,8 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Lock() if ssl.MaxConnections > 0 && len(ssl.connections) >= ssl.MaxConnections { ssl.connectionsMtx.Unlock() + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() continue } @@ -77,6 +83,8 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Lock() for _, c := range ssl.connections { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() } ssl.connectionsMtx.Unlock() @@ -92,13 +100,13 @@ func (ssl *streamSocketListener) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(ssl.ServiceAddress, "://", 2)[0]) } - if ssl.KeepAlivePeriod.Duration == 0 { + if *ssl.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(ssl.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*ssl.KeepAlivePeriod)) } func (ssl *streamSocketListener) removeConnection(c net.Conn) { @@ -114,12 +122,16 @@ func (ssl *streamSocketListener) read(c net.Conn) { decoder, err := internal.NewStreamContentDecoder(ssl.ContentEncoding, c) if err != nil { ssl.Log.Error("Read error: %v", err) + return } scnr := bufio.NewScanner(decoder) for { - if ssl.ReadTimeout != nil && ssl.ReadTimeout.Duration > 0 { - c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration)) + if ssl.ReadTimeout != nil && *ssl.ReadTimeout > 0 { + if err := c.SetReadDeadline(time.Now().Add(time.Duration(*ssl.ReadTimeout))); err != nil { + ssl.Log.Error("setting read deadline failed: %v", err) + return + } } if !scnr.Scan() { break @@ -182,13 +194,13 @@ func (psl *packetSocketListener) listen() { } type SocketListener struct { - ServiceAddress string `toml:"service_address"` - MaxConnections int `toml:"max_connections"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` - ReadTimeout *internal.Duration `toml:"read_timeout"` - KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` - SocketMode string `toml:"socket_mode"` - ContentEncoding string `toml:"content_encoding"` + ServiceAddress string `toml:"service_address"` + MaxConnections int `toml:"max_connections"` + ReadBufferSize config.Size `toml:"read_buffer_size"` + ReadTimeout *config.Duration `toml:"read_timeout"` + KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` + ContentEncoding string `toml:"content_encoding"` tlsint.ServerConfig wg sync.WaitGroup @@ -288,6 +300,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { // no good way of testing for "file does not exist". // Instead just ignore error and blow up when we try to listen, which will // indicate "address already in use" if file existed and we couldn't remove. + //nolint:errcheck,revive os.Remove(addr) } @@ -318,7 +331,9 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - os.Chmod(spl[1], os.FileMode(uint32(i))) + if err := os.Chmod(spl[1], os.FileMode(uint32(i))); err != nil { + return err + } } ssl := &streamSocketListener{ @@ -353,12 +368,16 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - os.Chmod(spl[1], os.FileMode(uint32(i))) + if err := os.Chmod(spl[1], os.FileMode(uint32(i))); err != nil { + return err + } } - if sl.ReadBufferSize.Size > 0 { + if sl.ReadBufferSize > 0 { if srb, ok := pc.(setReadBufferer); ok { - srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) + if err := srb.SetReadBuffer(int(sl.ReadBufferSize)); err != nil { + sl.Log.Warnf("Setting read buffer on a %s socket failed: %v", protocol, err) + } } else { sl.Log.Warnf("Unable to set read buffer on a %s socket", protocol) } @@ -417,6 +436,8 @@ func udpListen(network string, address string) (net.PacketConn, error) { func (sl *SocketListener) Stop() { if sl.Closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive sl.Close() sl.Closer = nil } @@ -438,7 +459,9 @@ type unixCloser struct { func (uc unixCloser) Close() error { err := uc.closer.Close() - os.Remove(uc.path) // ignore error + // Ignore the error if e.g. the file does not exist + //nolint:errcheck,revive + os.Remove(uc.path) return err } diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index a46add15cf61b..a3ccacae1ceb2 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -4,14 +4,15 @@ import ( "bytes" "crypto/tls" "io" - "io/ioutil" "log" "net" "os" "path/filepath" + "runtime" "testing" "time" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/wlog" @@ -67,7 +68,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { } func TestSocketListener_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") @@ -98,7 +99,7 @@ func TestSocketListener_tcp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -117,7 +118,7 @@ func TestSocketListener_udp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -131,18 +132,19 @@ func TestSocketListener_udp(t *testing.T) { } func TestSocketListener_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") defer testEmptyLog(t)() - os.Create(sock) + f, _ := os.Create(sock) + require.NoError(t, f.Close()) sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -156,18 +158,23 @@ func TestSocketListener_unix(t *testing.T) { } func TestSocketListener_unixgram(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") defer testEmptyLog(t)() - os.Create(sock) + _, err = os.Create(sock) + require.NoError(t, err) sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unixgram://" + sock - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -186,7 +193,7 @@ func TestSocketListenerDecode_tcp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) sl.ContentEncoding = "gzip" acc := &testutil.Accumulator{} @@ -206,7 +213,7 @@ func TestSocketListenerDecode_udp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) sl.ContentEncoding = "gzip" acc := &testutil.Accumulator{} @@ -236,9 +243,10 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { require.NoError(t, err) } - client.Write(mstr12) - client.Write(mstr3) - + _, err := client.Write(mstr12) + require.NoError(t, err) + _, err = client.Write(mstr3) + require.NoError(t, err) acc := sl.Accumulator.(*testutil.Accumulator) acc.Wait(3) diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index ce44fa0869c20..08531e7433b34 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -18,10 +19,6 @@ import ( const mbeansPath = "/admin/mbeans?stats=true&wt=json&cat=CORE&cat=QUERYHANDLER&cat=UPDATEHANDLER&cat=CACHE" const adminCoresPath = "/solr/admin/cores?action=STATUS&wt=json" -type node struct { - Host string `json:"host"` -} - const sampleConfig = ` ## specify a list of one or more Solr servers servers = ["http://localhost:8983"] @@ -40,7 +37,7 @@ type Solr struct { Servers []string Username string Password string - HTTPTimeout internal.Duration + HTTPTimeout config.Duration Cores []string client *http.Client } @@ -125,7 +122,7 @@ type Cache struct { // NewSolr return a new instance of Solr func NewSolr() *Solr { return &Solr{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), } } @@ -289,7 +286,6 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa "handler": name}, time, ) - } return nil } @@ -466,11 +462,11 @@ func (s *Solr) mbeansURL(server string, core string) string { func (s *Solr) createHTTPClient() *http.Client { tr := &http.Transport{ - ResponseHeaderTimeout: s.HTTPTimeout.Duration, + ResponseHeaderTimeout: time.Duration(s.HTTPTimeout), } client := &http.Client{ Transport: tr, - Timeout: s.HTTPTimeout.Duration, + Timeout: time.Duration(s.HTTPTimeout), } return client @@ -497,10 +493,8 @@ func (s *Solr) gatherData(url string, v interface{}) error { return fmt.Errorf("solr: API responded with status-code %d, expected %d, url %s", r.StatusCode, http.StatusOK, url) } - if err = json.NewDecoder(r.Body).Decode(v); err != nil { - return err - } - return nil + + return json.NewDecoder(r.Body).Decode(v) } func init() { diff --git a/plugins/inputs/solr/solr_test.go b/plugins/inputs/solr/solr_test.go index 270816909c37d..42a6753c9b999 100644 --- a/plugins/inputs/solr/solr_test.go +++ b/plugins/inputs/solr/solr_test.go @@ -104,22 +104,29 @@ func TestNoCoreDataHandling(t *testing.T) { acc.AssertDoesNotContainMeasurement(t, "solr_queryhandler") acc.AssertDoesNotContainMeasurement(t, "solr_updatehandler") acc.AssertDoesNotContainMeasurement(t, "solr_handler") - } func createMockServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansMainResponse) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansCore1Response) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -130,15 +137,23 @@ func createMockNoCoreDataServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, nodata) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, nodata) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -148,15 +163,23 @@ func createMockSolr3Server() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr3MainResponse) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr3MainResponse) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -166,12 +189,18 @@ func createMockSolr7Server() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr7Response) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) diff --git a/plugins/inputs/sql/README.md b/plugins/inputs/sql/README.md new file mode 100644 index 0000000000000..cc8a464016d28 --- /dev/null +++ b/plugins/inputs/sql/README.md @@ -0,0 +1,154 @@ +# SQL Input Plugin + +This plugin reads metrics from performing SQL queries against a SQL server. Different server +types are supported and their settings might differ (especially the connection parameters). +Please check the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for the +`driver` name and options for the data-source-name (`dsn`) options. + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage `. + +```toml +[[inputs.sql]] + ## Database Driver + ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for + ## a list of supported drivers. + driver = "mysql" + + ## Data source name for connecting + ## The syntax and supported options depends on selected driver. + dsn = "username:password@mysqlserver:3307/dbname?param=value" + + ## Timeout for any operation + ## Note that the timeout for queries is per query not per gather. + # timeout = "5s" + + ## Connection time limits + ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections + ## will not be closed automatically. If you specify a positive time, the connections will be closed after + ## idleing or existing for at least that amount of time, respectively. + # connection_max_idle_time = "0s" + # connection_max_life_time = "0s" + + ## Connection count limits + ## By default the number of open connections is not limited and the number of maximum idle connections + ## will be inferred from the number of queries specified. If you specify a positive number for any of the + ## two options, connections will be closed when reaching the specified limit. The number of idle connections + ## will be clipped to the maximum number of connections limit if any. + # connection_max_open = 0 + # connection_max_idle = auto + + [[inputs.sql.query]] + ## Query to perform on the server + query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" + ## Alternatively to specifying the query directly you can select a file here containing the SQL query. + ## Only one of 'query' and 'query_script' can be specified! + # query_script = "/path/to/sql/script.sql" + + ## Name of the measurement + ## In case both measurement and 'measurement_col' are given, the latter takes precedence. + # measurement = "sql" + + ## Column name containing the name of the measurement + ## If given, this will take precedence over the 'measurement' setting. In case a query result + ## does not contain the specified column, we fall-back to the 'measurement' setting. + # measurement_column = "" + + ## Column name containing the time of the measurement + ## If ommited, the time of the query will be used. + # time_column = "" + + ## Format of the time contained in 'time_col' + ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. + ## See https://golang.org/pkg/time/#Time.Format for details. + # time_format = "unix" + + ## Column names containing tags + ## An empty include list will reject all columns and an empty exclude list will not exclude any column. + ## I.e. by default no columns will be returned as tag and the tags are empty. + # tag_columns_include = [] + # tag_columns_exclude = [] + + ## Column names containing fields (explicit types) + ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + # field_columns_float = [] + # field_columns_int = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] + + ## Column names containing fields (automatic types) + ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty + ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. + ## NOTE: We rely on the database driver to perform automatic datatype conversion. + # field_columns_include = [] + # field_columns_exclude = [] +``` + +### Options +#### Driver +The `driver` and `dsn` options specify how to connect to the database. As especially the `dsn` format and +values vary with the `driver` refer to the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for possible values and more details. + +#### Connection limits +With these options you can limit the number of connections kept open by this plugin. Details about the exact +workings can be found in the [golang sql documentation](https://golang.org/pkg/database/sql/#DB.SetConnMaxIdleTime). + +#### Query sections +Multiple `query` sections can be specified for this plugin. Each specified query will first be prepared on the server +and then executed in every interval using the column mappings specified. Please note that `tag` and `field` columns +are not exclusive, i.e. a column can be added to both. When using both `include` and `exclude` lists, the `exclude` +list takes precedence over the `include` list. I.e. given you specify `foo` in both lists, `foo` will _never_ pass +the filter. In case any the columns specified in `measurement_col` or `time_col` are _not_ returned by the query, +the plugin falls-back to the documented defaults. Fields or tags specified in the includes of the options but missing +in the returned query are silently ignored. + +### Types +This plugin relies on the driver to do the type conversion. For the different properties of the metric the following +types are accepted. + +#### Measurement +Only columns of type `string` are accepted. + +#### Time +For the metric time columns of type `time` are accepted directly. For numeric columns, `time_format` should be set +to any of `unix`, `unix_ms`, `unix_ns` or `unix_us` accordingly. By default the a timestamp in `unix` format is +expected. For string columns, please specify the `time_format` accordingly. +See the [golang time documentation](https://golang.org/pkg/time/#Time.Format) for details. + +#### Tags +For tags columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), +floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Those values will be converted to string. + +#### Fields +For fields columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), +floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Here `bytes` will be converted to `string`, +signed and unsigned integer values will be converted to `int64` or `uint64` respectively. Floating-point values are converted to `float64` and `time` is converted to a nanosecond timestamp of type `int64`. + +### Example Output +Using the [MariaDB sample database](https://www.mariadbtutorial.com/getting-started/mariadb-sample-database) and the +configuration +```toml +[[inputs.sql]] + driver = "mysql" + dsn = "root:password@/nation" + + [[inputs.sql.query]] + query="SELECT * FROM guests" + measurement = "nation" + tag_cols_include = ["name"] + field_cols_exclude = ["name"] +``` + +Telegraf will output the following metrics +``` +nation,host=Hugin,name=John guest_id=1i 1611332164000000000 +nation,host=Hugin,name=Jane guest_id=2i 1611332164000000000 +nation,host=Hugin,name=Jean guest_id=3i 1611332164000000000 +nation,host=Hugin,name=Storm guest_id=4i 1611332164000000000 +nation,host=Hugin,name=Beast guest_id=5i 1611332164000000000 +``` diff --git a/plugins/inputs/sql/drivers.go b/plugins/inputs/sql/drivers.go new file mode 100644 index 0000000000000..09af9bfc890f8 --- /dev/null +++ b/plugins/inputs/sql/drivers.go @@ -0,0 +1,8 @@ +package sql + +import ( + // Blank imports to register the drivers + _ "github.com/denisenkom/go-mssqldb" + _ "github.com/go-sql-driver/mysql" + _ "github.com/jackc/pgx/v4/stdlib" +) diff --git a/plugins/inputs/sql/drivers_sqlite.go b/plugins/inputs/sql/drivers_sqlite.go new file mode 100644 index 0000000000000..945e2b8425a3b --- /dev/null +++ b/plugins/inputs/sql/drivers_sqlite.go @@ -0,0 +1,11 @@ +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin +// +build !mips !mips64 + +package sql + +import ( + _ "modernc.org/sqlite" +) diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go new file mode 100644 index 0000000000000..87227663bb4d0 --- /dev/null +++ b/plugins/inputs/sql/sql.go @@ -0,0 +1,540 @@ +package sql + +import ( + "context" + dbsql "database/sql" + "errors" + "fmt" + "os" + "sort" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## Database Driver + ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for + ## a list of supported drivers. + driver = "mysql" + + ## Data source name for connecting + ## The syntax and supported options depends on selected driver. + dsn = "username:password@mysqlserver:3307/dbname?param=value" + + ## Timeout for any operation + ## Note that the timeout for queries is per query not per gather. + # timeout = "5s" + + ## Connection time limits + ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections + ## will not be closed automatically. If you specify a positive time, the connections will be closed after + ## idleing or existing for at least that amount of time, respectively. + # connection_max_idle_time = "0s" + # connection_max_life_time = "0s" + + ## Connection count limits + ## By default the number of open connections is not limited and the number of maximum idle connections + ## will be inferred from the number of queries specified. If you specify a positive number for any of the + ## two options, connections will be closed when reaching the specified limit. The number of idle connections + ## will be clipped to the maximum number of connections limit if any. + # connection_max_open = 0 + # connection_max_idle = auto + + [[inputs.sql.query]] + ## Query to perform on the server + query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" + ## Alternatively to specifying the query directly you can select a file here containing the SQL query. + ## Only one of 'query' and 'query_script' can be specified! + # query_script = "/path/to/sql/script.sql" + + ## Name of the measurement + ## In case both measurement and 'measurement_col' are given, the latter takes precedence. + # measurement = "sql" + + ## Column name containing the name of the measurement + ## If given, this will take precedence over the 'measurement' setting. In case a query result + ## does not contain the specified column, we fall-back to the 'measurement' setting. + # measurement_column = "" + + ## Column name containing the time of the measurement + ## If ommited, the time of the query will be used. + # time_column = "" + + ## Format of the time contained in 'time_col' + ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. + ## See https://golang.org/pkg/time/#Time.Format for details. + # time_format = "unix" + + ## Column names containing tags + ## An empty include list will reject all columns and an empty exclude list will not exclude any column. + ## I.e. by default no columns will be returned as tag and the tags are empty. + # tag_columns_include = [] + # tag_columns_exclude = [] + + ## Column names containing fields (explicit types) + ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + # field_columns_float = [] + # field_columns_int = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] + + ## Column names containing fields (automatic types) + ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty + ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. + ## NOTE: We rely on the database driver to perform automatic datatype conversion. + # field_columns_include = [] + # field_columns_exclude = [] +` + +const magicIdleCount int = (-int(^uint(0) >> 1)) + +type Query struct { + Query string `toml:"query"` + Script string `toml:"query_script"` + Measurement string `toml:"measurement"` + MeasurementColumn string `toml:"measurement_column"` + TimeColumn string `toml:"time_column"` + TimeFormat string `toml:"time_format"` + TagColumnsInclude []string `toml:"tag_columns_include"` + TagColumnsExclude []string `toml:"tag_columns_exclude"` + FieldColumnsInclude []string `toml:"field_columns_include"` + FieldColumnsExclude []string `toml:"field_columns_exclude"` + FieldColumnsFloat []string `toml:"field_columns_float"` + FieldColumnsInt []string `toml:"field_columns_int"` + FieldColumnsUint []string `toml:"field_columns_uint"` + FieldColumnsBool []string `toml:"field_columns_bool"` + FieldColumnsString []string `toml:"field_columns_string"` + + statement *dbsql.Stmt + tagFilter filter.Filter + fieldFilter filter.Filter + fieldFilterFloat filter.Filter + fieldFilterInt filter.Filter + fieldFilterUint filter.Filter + fieldFilterBool filter.Filter + fieldFilterString filter.Filter +} + +func (q *Query) parse(ctx context.Context, acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (int, error) { + columnNames, err := rows.Columns() + if err != nil { + return 0, err + } + + // Prepare the list of datapoints according to the received row + columnData := make([]interface{}, len(columnNames)) + columnDataPtr := make([]interface{}, len(columnNames)) + + for i := range columnData { + columnDataPtr[i] = &columnData[i] + } + + rowCount := 0 + for rows.Next() { + measurement := q.Measurement + timestamp := t + tags := make(map[string]string) + fields := make(map[string]interface{}, len(columnNames)) + + // Do the parsing with (hopefully) automatic type conversion + if err := rows.Scan(columnDataPtr...); err != nil { + return 0, err + } + + for i, name := range columnNames { + if q.MeasurementColumn != "" && name == q.MeasurementColumn { + var ok bool + if measurement, ok = columnData[i].(string); !ok { + return 0, fmt.Errorf("measurement column type \"%T\" unsupported", columnData[i]) + } + } + + if q.TimeColumn != "" && name == q.TimeColumn { + var fieldvalue interface{} + var skipParsing bool + + switch v := columnData[i].(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + fieldvalue = v + case []byte: + fieldvalue = string(v) + case time.Time: + timestamp = v + skipParsing = true + case fmt.Stringer: + fieldvalue = v.String() + default: + return 0, fmt.Errorf("time column %q of type \"%T\" unsupported", name, columnData[i]) + } + if !skipParsing { + if timestamp, err = internal.ParseTimestamp(q.TimeFormat, fieldvalue, ""); err != nil { + return 0, fmt.Errorf("parsing time failed: %v", err) + } + } + } + + if q.tagFilter.Match(name) { + tagvalue, err := internal.ToString(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting tag column %q failed: %v", name, err) + } + if v := strings.TrimSpace(tagvalue); v != "" { + tags[name] = v + } + } + + // Explicit type conversions take precedence + if q.fieldFilterFloat.Match(name) { + v, err := internal.ToFloat64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to float failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterInt.Match(name) { + v, err := internal.ToInt64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to int failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterUint.Match(name) { + v, err := internal.ToUint64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to uint failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterBool.Match(name) { + v, err := internal.ToBool(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to bool failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterString.Match(name) { + v, err := internal.ToString(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to string failed: %v", name, err) + } + fields[name] = v + continue + } + + // Try automatic conversion for all remaining fields + if q.fieldFilter.Match(name) { + var fieldvalue interface{} + switch v := columnData[i].(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool: + fieldvalue = v + case []byte: + fieldvalue = string(v) + case time.Time: + fieldvalue = v.UnixNano() + case nil: + fieldvalue = nil + case fmt.Stringer: + fieldvalue = v.String() + default: + return 0, fmt.Errorf("field column %q of type \"%T\" unsupported", name, columnData[i]) + } + if fieldvalue != nil { + fields[name] = fieldvalue + } + } + } + acc.AddFields(measurement, fields, tags, timestamp) + rowCount++ + } + + if err := rows.Err(); err != nil { + return rowCount, err + } + + return rowCount, nil +} + +type SQL struct { + Driver string `toml:"driver"` + Dsn string `toml:"dsn"` + Timeout config.Duration `toml:"timeout"` + MaxIdleTime config.Duration `toml:"connection_max_idle_time"` + MaxLifetime config.Duration `toml:"connection_max_life_time"` + MaxOpenConnections int `toml:"connection_max_open"` + MaxIdleConnections int `toml:"connection_max_idle"` + Queries []Query `toml:"query"` + Log telegraf.Logger `toml:"-"` + + driverName string + db *dbsql.DB +} + +func (s *SQL) Description() string { + return `Read metrics from SQL queries` +} + +func (s *SQL) SampleConfig() string { + return sampleConfig +} + +func (s *SQL) Init() error { + // Option handling + if s.Driver == "" { + return errors.New("missing SQL driver option") + } + + if s.Dsn == "" { + return errors.New("missing data source name (DSN) option") + } + + if s.Timeout <= 0 { + s.Timeout = config.Duration(5 * time.Second) + } + + if s.MaxIdleConnections == magicIdleCount { + // Determine the number by the number of queries + the golang default value + s.MaxIdleConnections = len(s.Queries) + 2 + } + + for i, q := range s.Queries { + if q.Query == "" && q.Script == "" { + return errors.New("neither 'query' nor 'query_script' specified") + } + + if q.Query != "" && q.Script != "" { + return errors.New("only one of 'query' and 'query_script' can be specified") + } + + // In case we got a script, we should read the query now. + if q.Script != "" { + query, err := os.ReadFile(q.Script) + if err != nil { + return fmt.Errorf("reading script %q failed: %v", q.Script, err) + } + s.Queries[i].Query = string(query) + } + + // Time format + if q.TimeFormat == "" { + s.Queries[i].TimeFormat = "unix" + } + + // Compile the tag-filter + tagfilter, err := filter.NewIncludeExcludeFilterDefaults(q.TagColumnsInclude, q.TagColumnsExclude, false, false) + if err != nil { + return fmt.Errorf("creating tag filter failed: %v", err) + } + s.Queries[i].tagFilter = tagfilter + + // Compile the explicit type field-filter + fieldfilterFloat, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsFloat, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for float failed: %v", err) + } + s.Queries[i].fieldFilterFloat = fieldfilterFloat + + fieldfilterInt, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsInt, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for int failed: %v", err) + } + s.Queries[i].fieldFilterInt = fieldfilterInt + + fieldfilterUint, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsUint, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for uint failed: %v", err) + } + s.Queries[i].fieldFilterUint = fieldfilterUint + + fieldfilterBool, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsBool, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for bool failed: %v", err) + } + s.Queries[i].fieldFilterBool = fieldfilterBool + + fieldfilterString, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsString, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for string failed: %v", err) + } + s.Queries[i].fieldFilterString = fieldfilterString + + // Compile the field-filter + fieldfilter, err := filter.NewIncludeExcludeFilter(q.FieldColumnsInclude, q.FieldColumnsExclude) + if err != nil { + return fmt.Errorf("creating field filter failed: %v", err) + } + s.Queries[i].fieldFilter = fieldfilter + + if q.Measurement == "" { + s.Queries[i].Measurement = "sql" + } + } + + // Derive the sql-framework driver name from our config name. This abstracts the actual driver + // from the database-type the user wants. + aliases := map[string]string{ + "cockroach": "pgx", + "tidb": "mysql", + "mssql": "sqlserver", + "maria": "mysql", + "postgres": "pgx", + } + s.driverName = s.Driver + if driver, ok := aliases[s.Driver]; ok { + s.driverName = driver + } + + availDrivers := dbsql.Drivers() + if !choice.Contains(s.driverName, availDrivers) { + for d, r := range aliases { + if choice.Contains(r, availDrivers) { + availDrivers = append(availDrivers, d) + } + } + + // Sort the list of drivers and make them unique + sort.Strings(availDrivers) + last := 0 + for _, d := range availDrivers { + if d != availDrivers[last] { + last++ + availDrivers[last] = d + } + } + availDrivers = availDrivers[:last+1] + + return fmt.Errorf("driver %q not supported use one of %v", s.Driver, availDrivers) + } + + return nil +} + +func (s *SQL) Start(_ telegraf.Accumulator) error { + var err error + + // Connect to the database server + s.Log.Debugf("Connecting to %q...", s.Dsn) + s.db, err = dbsql.Open(s.driverName, s.Dsn) + if err != nil { + return err + } + + // Set the connection limits + // s.db.SetConnMaxIdleTime(time.Duration(s.MaxIdleTime)) // Requires go >= 1.15 + s.db.SetConnMaxLifetime(time.Duration(s.MaxLifetime)) + s.db.SetMaxOpenConns(s.MaxOpenConnections) + s.db.SetMaxIdleConns(s.MaxIdleConnections) + + // Test if the connection can be established + s.Log.Debugf("Testing connectivity...") + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + err = s.db.PingContext(ctx) + cancel() + if err != nil { + return fmt.Errorf("connecting to database failed: %v", err) + } + + // Prepare the statements + for i, q := range s.Queries { + s.Log.Debugf("Preparing statement %q...", q.Query) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + stmt, err := s.db.PrepareContext(ctx, q.Query) //nolint:sqlclosecheck // Closed in Stop() + cancel() + if err != nil { + return fmt.Errorf("preparing query %q failed: %v", q.Query, err) + } + s.Queries[i].statement = stmt + } + + return nil +} + +func (s *SQL) Stop() { + // Free the statements + for _, q := range s.Queries { + if q.statement != nil { + if err := q.statement.Close(); err != nil { + s.Log.Errorf("closing statement for query %q failed: %v", q.Query, err) + } + } + } + + // Close the connection to the server + if s.db != nil { + if err := s.db.Close(); err != nil { + s.Log.Errorf("closing database connection failed: %v", err) + } + } +} + +func (s *SQL) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + tstart := time.Now() + for _, query := range s.Queries { + wg.Add(1) + go func(q Query) { + defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + defer cancel() + if err := s.executeQuery(ctx, acc, q, tstart); err != nil { + acc.AddError(err) + } + }(query) + } + wg.Wait() + s.Log.Debugf("Executed %d queries in %s", len(s.Queries), time.Since(tstart).String()) + + return nil +} + +func init() { + inputs.Add("sql", func() telegraf.Input { + return &SQL{ + MaxIdleTime: config.Duration(0), // unlimited + MaxLifetime: config.Duration(0), // unlimited + MaxOpenConnections: 0, // unlimited + MaxIdleConnections: magicIdleCount, // will trigger auto calculation + } + }) +} + +func (s *SQL) executeQuery(ctx context.Context, acc telegraf.Accumulator, q Query, tquery time.Time) error { + if q.statement == nil { + return fmt.Errorf("statement is nil for query %q", q.Query) + } + + // Execute the query + rows, err := q.statement.QueryContext(ctx) + if err != nil { + return err + } + defer rows.Close() + + // Handle the rows + columnNames, err := rows.Columns() + if err != nil { + return err + } + rowCount, err := q.parse(ctx, acc, rows, tquery) + s.Log.Debugf("Received %d rows and %d columns for query %q", rowCount, len(columnNames), q.Query) + + return err +} diff --git a/plugins/inputs/sql/sql_test.go b/plugins/inputs/sql/sql_test.go new file mode 100644 index 0000000000000..35010eeb5ecdf --- /dev/null +++ b/plugins/inputs/sql/sql_test.go @@ -0,0 +1,272 @@ +package sql + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "math/rand" + "path/filepath" + + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func pwgen(n int) string { + charset := []byte("abcdedfghijklmnopqrstABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + nchars := len(charset) + buffer := make([]byte, n) + + for i := range buffer { + buffer[i] = charset[rand.Intn(nchars)] + } + + return string(buffer) +} + +var spinup = flag.Bool("spinup", false, "Spin-up the required test containers") + +func TestMariaDB(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + addr := "127.0.0.1" + port := "3306" + passwd := "" + database := "foo" + + if *spinup { + logger.Infof("Spinning up container...") + + // Generate a random password + passwd = pwgen(32) + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/mariadb") + require.NoError(t, err, "determining absolute path of test-data failed") + + // Spin-up the container + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "mariadb", + Env: map[string]string{ + "MYSQL_ROOT_PASSWORD": passwd, + "MYSQL_DATABASE": database, + }, + BindMounts: map[string]string{ + testdata: "/docker-entrypoint-initdb.d", + }, + ExposedPorts: []string{"3306/tcp"}, + WaitingFor: wait.ForListeningPort("3306/tcp"), + }, + Started: true, + } + container, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, container.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + addr, err = container.Host(ctx) + require.NoError(t, err, "getting container host address failed") + p, err := container.MappedPort(ctx, "3306/tcp") + require.NoError(t, err, "getting container host port failed") + port = p.Port() + } + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "2006-01-02 15:04:05", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "maria", + Dsn: fmt.Sprintf("root:%s@tcp(%s:%s)/%s", passwd, addr, port, database), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} + +func TestPostgreSQL(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + addr := "127.0.0.1" + port := "5432" + passwd := "" + database := "foo" + + if *spinup { + logger.Infof("Spinning up container...") + + // Generate a random password + passwd = pwgen(32) + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/postgres") + require.NoError(t, err, "determining absolute path of test-data failed") + + // Spin-up the container + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "postgres", + Env: map[string]string{ + "POSTGRES_PASSWORD": passwd, + "POSTGRES_DB": database, + }, + BindMounts: map[string]string{ + testdata: "/docker-entrypoint-initdb.d", + }, + ExposedPorts: []string{"5432/tcp"}, + WaitingFor: wait.ForListeningPort("5432/tcp"), + }, + Started: true, + } + container, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, container.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + addr, err = container.Host(ctx) + require.NoError(t, err, "getting container host address failed") + p, err := container.MappedPort(ctx, "5432/tcp") + require.NoError(t, err, "getting container host port failed") + port = p.Port() + } + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "2006-01-02 15:04:05", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "pgx", + Dsn: fmt.Sprintf("postgres://postgres:%v@%v:%v/%v", passwd, addr, port, database), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/sql/testdata/mariadb/expected.sql b/plugins/inputs/sql/testdata/mariadb/expected.sql new file mode 100644 index 0000000000000..49a3095db4da2 --- /dev/null +++ b/plugins/inputs/sql/testdata/mariadb/expected.sql @@ -0,0 +1,36 @@ +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `bar` ( + `baz` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `bar` VALUES (1); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric three` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag four` text DEFAULT NULL, + `string two` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric three` VALUES ('2021-05-17 22:04:45','tag4','string2'); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_one` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_one` text DEFAULT NULL, + `tag_two` text DEFAULT NULL, + `int64_one` int(11) DEFAULT NULL, + `int64_two` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_two` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_three` text DEFAULT NULL, + `string_one` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_two` VALUES ('2021-05-17 22:04:45','tag3','string1'); diff --git a/plugins/inputs/sql/testdata/postgres/expected.sql b/plugins/inputs/sql/testdata/postgres/expected.sql new file mode 100644 index 0000000000000..8bc2b2fc83018 --- /dev/null +++ b/plugins/inputs/sql/testdata/postgres/expected.sql @@ -0,0 +1,41 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; +SET default_tablespace = ''; +SET default_table_access_method = heap; +CREATE TABLE public."metric three" ( + "timestamp" timestamp without time zone, + "tag four" text, + "string two" text +); +ALTER TABLE public."metric three" OWNER TO postgres; +CREATE TABLE public.metric_one ( + "timestamp" timestamp without time zone, + tag_one text, + tag_two text, + int64_one integer, + int64_two integer +); +ALTER TABLE public.metric_one OWNER TO postgres; +CREATE TABLE public.metric_two ( + "timestamp" timestamp without time zone, + tag_three text, + string_one text +); +ALTER TABLE public.metric_two OWNER TO postgres; +COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; +2021-05-17 22:04:45 tag4 string2 +\. +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 +\. +COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; +2021-05-17 22:04:45 tag3 string1 +\. diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 27c6da1cd7571..721906250d699 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -1,14 +1,15 @@ # SQL Server Input Plugin -The `sqlserver` plugin provides metrics for your SQL Server instance. It -currently works with SQL Server 2008 SP3 and newer. Recorded metrics are +The `sqlserver` plugin provides metrics for your SQL Server instance. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. ### The SQL Server plugin supports the following editions/versions of SQL Server - SQL Server - - 2008 SP3 (with CU3) - - SQL Server 2008 R2 SP3 and newer versions + - 2012 or newer (Plugin support aligned with the [official Microsoft SQL Server support](https://docs.microsoft.com/en-us/sql/sql-server/end-of-support/sql-server-end-of-life-overview?view=sql-server-ver15#lifecycle-dates)) + - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will + need to be addressed by the community. - Azure SQL Database (Single) - Azure SQL Managed Instance +- Azure SQL Elastic Pool ### Additional Setup: @@ -48,27 +49,69 @@ GO ## See https://github.com/denisenkom/go-mssqldb for detailed connection ## parameters, in particular, tls connections can be created like so: ## "encrypt=true;certificate=;hostNameInCertificate=" - # servers = [ - # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", - # ] - - ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 - ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. - ## Possible values for database_type are - ## "AzureSQLDB" - ## "SQLServer" - ## "AzureSQLManagedInstance" + servers = [ + "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", + ] + + ## Authentication method + ## valid methods: "connection_string", "AAD" + # auth_method = "connection_string" + + ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 + ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. + ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" + + ## Queries enabled by default for database_type = "AzureSQLDB" are - + ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, + ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + # database_type = "AzureSQLDB" + ## A list of queries to include. If not specified, all the above listed queries are used. + # include_query = [] + + ## A list of queries to explicitly ignore. + # exclude_query = [] + + ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - + ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, + ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers + + # database_type = "AzureSQLManagedInstance" + + # include_query = [] + + # exclude_query = [] + + ## Queries enabled by default for database_type = "SQLServer" are - + ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, + ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu + + database_type = "SQLServer" + + include_query = [] + + ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default + exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + + ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use + ## the new mechanism of identifying the database_type there by use it's corresponding queries + ## Optional parameter, setting this to 2 will use a new version - ## of the collection queries that break compatibility with the original dashboards. - ## Version 2 - is compatible from SQL Server 2008 Sp3 and later versions and also for SQL Azure DB - ## Version 2 is in the process of being deprecated, please consider using database_type. + ## of the collection queries that break compatibility with the original + ## dashboards. + ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB # query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false + ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". + ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". + ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. + ## This setting/metric is optional and is disabled by default. + # health_metric = false + ## Possible queries accross different versions of the collectors ## Queries enabled by default for specific Database Type @@ -105,6 +148,9 @@ GO ## - SQLServerRequests ## - SQLServerVolumeSpace ## - SQLServerCpu + ## and following as optional (if mentioned in the include_query list) + ## - SQLServerAvailabilityReplicaStates + ## - SQLServerDatabaseReplicaStates ## Version 2 by default collects the following queries ## Version 2 is being deprecated, please consider using database_type. @@ -131,17 +177,39 @@ GO ## - VolumeSpace ## - PerformanceMetrics +``` +### Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) - ## A list of queries to include. If not specified, all the above listed queries are used. - # include_query = [] +Azure SQL Database supports 2 main methods of authentication: [SQL authentication and AAD authentication](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). The recommended practice is to [use AAD authentication when possible](https://docs.microsoft.com/en-us/azure/azure-sql/database/authentication-aad-overview). - ## A list of queries to explicitly ignore. - exclude_query = [ 'Schedulers' , 'SqlRequests' ] +AAD is a more modern authentication protocol, allows for easier credential/role management, and can eliminate the need to include passwords in a connection string. +To enable support for AAD authentication, we leverage the existing AAD authentication support in the [SQL Server driver for Go](https://github.com/denisenkom/go-mssqldb#azure-active-directory-authentication---preview) +#### How to use AAD Auth with MSI +- Configure "system-assigned managed identity" for Azure resources on the Monitoring VM (the VM that'd connect to the SQL server/database) [using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). +- On the database being monitored, create/update a USER with the name of the Monitoring VM as the principal using the below script. This might require allow-listing the client machine's IP address (from where the below SQL script is being run) on the SQL Server resource. +```sql +EXECUTE ('IF EXISTS(SELECT * FROM sys.database_principals WHERE name = '''') + BEGIN + DROP USER [] + END') +EXECUTE ('CREATE USER [] FROM EXTERNAL PROVIDER') +EXECUTE ('GRANT VIEW DATABASE STATE TO []') +``` +- On the SQL Server resource of the database(s) being monitored, go to "Firewalls and Virtual Networks" tab and allowlist the monitoring VM IP address. +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). The auth method must be set to "AAD" +```toml + servers = [ + "Server=.database.windows.net;Port=1433;Database=;app name=telegraf;log=1;", + ] + auth_method = "AAD" ``` +- Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). ### Metrics: To provide backwards compatibility, this plugin support two versions of metrics queries. @@ -176,7 +244,7 @@ The new (version 2) metrics provide: - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. +- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. - *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. - *Schedulers* - This captures `sys.dm_os_schedulers`. - *SqlRequests* - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and @@ -205,7 +273,7 @@ These are metrics for Azure SQL Database (single database) and are very similar - AzureSQLDBMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. = AzureSQLDBResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` - AzureSQLDBPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLDBServerProperties: Relevant Azure SQL relevent properties from such as Tier, #Vcores, Memory etc, storage, etc. +- AzureSQLDBServerProperties: Relevant Azure SQL relevant properties from such as Tier, #Vcores, Memory etc, storage, etc. - AzureSQLDBWaitstats: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. - *AzureSQLOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide - *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` @@ -218,7 +286,7 @@ These are metrics for Azure SQL Managed instance, are very similar to version 2 - AzureSQLMIMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. - AzureSQLMIResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` - AzureSQLMIPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLMIServerProperties: Relevant Azure SQL relevent properties such as Tier, #Vcores, Memory etc, storage, etc. +- AzureSQLMIServerProperties: Relevant Azure SQL relevant properties such as Tier, #Vcores, Memory etc, storage, etc. - AzureSQLMIOsWaitstats: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide - AzureSQLMIRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` - AzureSQLMISchedulers - This captures `sys.dm_os_schedulers` snapshots. @@ -233,13 +301,15 @@ These are metrics for Azure SQL Managed instance, are very similar to version 2 - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. +- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. - SQLServerWaitStatsCategorized: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. - SQLServerSchedulers - This captures `sys.dm_os_schedulers`. - SQLServerRequests - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and blocking sessions. - SQLServerVolumeSpace - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. - SQLServerCpu - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). +- SQLServerAvailabilityReplicaStates: Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup +- SQLServerDatabaseReplicaStates: Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup #### Output Measures @@ -293,4 +363,20 @@ Version 2 queries have the following tags: - `sql_instance`: Physical host and instance name (hostname:instance) - `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. +#### Health Metric +All collection versions (version 1, version 2, and database_type) support an optional plugin health metric called `sqlserver_telegraf_health`. This metric tracks if connections to SQL Server are succeeding or failing. Users can leverage this metric to detect if their SQL Server monitoring is not working as intended. + +In the configuration file, toggling `health_metric` to `true` will enable collection of this metric. By default, this value is set to `false` and the metric is not collected. The health metric emits one record for each connection specified by `servers` in the configuration file. + +The health metric emits the following tags: +- `sql_instance` - Name of the server specified in the connection string. This value is emitted as-is in the connection string. If the server could not be parsed from the connection string, a constant placeholder value is emitted +- `database_name` - Name of the database or (initial catalog) specified in the connection string. This value is emitted as-is in the connection string. If the database could not be parsed from the connection string, a constant placeholder value is emitted + +The health metric emits the following fields: +- `attempted_queries` - Number of queries that were attempted for this connection +- `successful_queries` - Number of queries that completed successfully for this connection +- `database_type` - Type of database as specified by `database_type`. If `database_type` is empty, the `QueryVersion` and `AzureDB` fields are concatenated instead + +If `attempted_queries` and `successful_queries` are not equal for a given connection, some metrics were not successfully gathered for that connection. If `successful_queries` is 0, no metrics were successfully gathered. + [cardinality]: /docs/FAQ.md#user-content-q-how-can-i-manage-series-cardinality diff --git a/plugins/inputs/sqlserver/azuresqldbqueries_test.go b/plugins/inputs/sqlserver/azuresqldbqueries_test.go new file mode 100644 index 0000000000000..6d5712f39509a --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqldbqueries_test.go @@ -0,0 +1,450 @@ +package sqlserver + +import ( + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAzureSQL_Database_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "database_name")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_data_io_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_log_write_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_memory_usage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "xtp_storage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_worker_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_session_percent")) + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "dtu_limit")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "avg_login_rate_percent")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "end_time")) // Time field. + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_memory_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Database_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_db_resource_governance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "database_name")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "slo_name")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "dtu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_memory")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_max_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "db_file_growth_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "log_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_min_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_min_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_io")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_min_cpu")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_pool_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "pool_max_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_mbps")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_pfs_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_pfs_iops")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_WaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBWaitStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azuredb_waitstats")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "current_size_mb")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "space_used_mb")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_server_properties", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Database_OsWaitstats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "database_name")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "database_name")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasField("sqlserver_requests", "wait_type")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasField("sqlserver_requests", "nt_user_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBSchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go new file mode 100644 index 0000000000000..72a74174a8722 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go @@ -0,0 +1,378 @@ +package sqlserver + +import ( + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "os" + "testing" +) + +func TestAzureSQL_Managed_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Managed_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_instance_resource_governance")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "tempdb_log_file_number")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_man_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_ext_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "vol_ext_xtore_iops")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version_desc")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_online")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_restoring")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recovering")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recoveryPending")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_suspect")) + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Managed_OsWaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_type")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasTag("sqlserver_requests", "nt_user_name")) + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMISchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 04a76cc983cb2..17361c20d41f8 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -31,6 +31,7 @@ SELECT TOP(1) ,[end_time] ,cast([avg_instance_memory_percent] as float) as [avg_instance_memory_percent] ,cast([avg_instance_cpu_percent] as float) as [avg_instance_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_db_resource_stats WITH (NOLOCK) ORDER BY @@ -80,6 +81,7 @@ SELECT ,[volume_type_external_xstore_iops] ,[volume_pfs_iops] ,[volume_type_pfs_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_user_db_resource_governance WITH (NOLOCK); ` @@ -96,13 +98,14 @@ END SELECT 'sqlserver_azuredb_waitstats' AS [measurement] ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name'] + ,DB_NAME() as [database_name] ,dbws.[wait_type] ,dbws.[wait_time_ms] ,dbws.[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] ,dbws.[signal_wait_time_ms] ,dbws.[max_wait_time_ms] ,dbws.[waiting_tasks_count] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_db_wait_stats AS dbws WITH (NOLOCK) WHERE @@ -180,6 +183,7 @@ SELECT END AS [file_type] ,ISNULL([size],0)/128 AS [current_size_mb] ,ISNULL(FILEPROPERTY(b.[logical_filename],'SpaceUsed')/128,0) as [space_used_mb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id @@ -237,6 +241,7 @@ SELECT ) END AS [available_storage_mb] ,(select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as [uptime] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.[databases] AS d -- sys.databases.database_id may not match current DB_ID on Azure SQL DB CROSS JOIN sys.[database_service_objectives] AS slo @@ -320,6 +325,7 @@ SELECT 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' ELSE 'Other' END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) WHERE ws.[wait_type] NOT IN ( @@ -374,6 +380,7 @@ SELECT ,DB_NAME() AS [database_name] ,mc.[type] AS [clerk_type] ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) GROUP BY mc.[type] @@ -431,101 +438,114 @@ WITH PerfCounters AS ( ELSE d.[physical_database_name] END WHERE - counter_name IN ( - ''SQL Compilations/sec'' - ,''SQL Re-Compilations/sec'' - ,''User Connections'' - ,''Batch Requests/sec'' - ,''Logouts/sec'' - ,''Logins/sec'' - ,''Processes blocked'' - ,''Latch Waits/sec'' - ,''Full Scans/sec'' - ,''Index Searches/sec'' - ,''Page Splits/sec'' - ,''Page lookups/sec'' - ,''Page reads/sec'' - ,''Page writes/sec'' - ,''Readahead pages/sec'' - ,''Lazy writes/sec'' - ,''Checkpoint pages/sec'' - ,''Page life expectancy'' - ,''Log File(s) Size (KB)'' - ,''Log File(s) Used Size (KB)'' - ,''Data File(s) Size (KB)'' - ,''Transactions/sec'' - ,''Write Transactions/sec'' - ,''Active Temp Tables'' - ,''Temp Tables Creation Rate'' - ,''Temp Tables For Destruction'' - ,''Free Space in tempdb (KB)'' - ,''Version Store Size (KB)'' - ,''Memory Grants Pending'' - ,''Memory Grants Outstanding'' - ,''Free list stalls/sec'' - ,''Buffer cache hit ratio'' - ,''Buffer cache hit ratio base'' - ,''Backup/Restore Throughput/sec'' - ,''Total Server Memory (KB)'' - ,''Target Server Memory (KB)'' - ,''Log Flushes/sec'' - ,''Log Flush Wait Time'' - ,''Memory broker clerk size'' - ,''Log Bytes Flushed/sec'' - ,''Bytes Sent to Replica/sec'' - ,''Log Send Queue'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Replica/sec'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Transport/sec'' - ,''Bytes Received from Replica/sec'' - ,''Receives from Replica/sec'' - ,''Flow Control Time (ms/sec)'' - ,''Flow Control/sec'' - ,''Resent Messages/sec'' - ,''Redone Bytes/sec'' - ,''XTP Memory Used (KB)'' - ,''Transaction Delay'' - ,''Log Bytes Received/sec'' - ,''Log Apply Pending Queue'' - ,''Redone Bytes/sec'' - ,''Recovery Queue'' - ,''Log Apply Ready Queue'' - ,''CPU usage %'' - ,''CPU usage % base'' - ,''Queued requests'' - ,''Requests completed/sec'' - ,''Blocked tasks'' - ,''Active memory grant amount (KB)'' - ,''Disk Read Bytes/sec'' - ,''Disk Read IO Throttled/sec'' - ,''Disk Read IO/sec'' - ,''Disk Write Bytes/sec'' - ,''Disk Write IO Throttled/sec'' - ,''Disk Write IO/sec'' - ,''Used memory (KB)'' - ,''Forwarded Records/sec'' - ,''Background Writer pages/sec'' - ,''Percent Log Used'' - ,''Log Send Queue KB'' - ,''Redo Queue KB'' - ,''Mirrored Write Transactions/sec'' - ,''Group Commit Time'' - ,''Group Commits/Sec'' - ) OR ( - spi.[object_name] LIKE ''%User Settable%'' - OR spi.[object_name] LIKE ''%SQL Errors%'' - OR spi.[object_name] LIKE ''%Batch Resp Statistics%'' - ) OR ( - spi.[instance_name] IN (''_Total'') - AND spi.[counter_name] IN ( - ''Lock Timeouts/sec'' - ,''Lock Timeouts (timeout > 0)/sec'' - ,''Number of Deadlocks/sec'' - ,''Lock Waits/sec'' - ,''Latch Waits/sec'' + /*filter out unnecessary SQL DB system database counters, other than master and tempdb*/ + NOT (spi.object_name LIKE 'MSSQL%:Databases%' AND spi.instance_name IN ('model','model_masterdb','model_userdb','msdb','mssqlsystemresource')) + AND + ( + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Query Store CPU usage' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) ) ) +) INSERT INTO @PCounters select * from PerfCounters @@ -539,8 +559,9 @@ SELECT WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value], + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability from @PCounters pc LEFT OUTER JOIN @PCounters AS pc1 ON ( @@ -585,6 +606,7 @@ SELECT ,s.[program_name] ,s.[host_name] ,s.[nt_user_name] + ,s.[login_name] ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) WHEN 0 THEN '0-Read Committed' @@ -610,6 +632,7 @@ SELECT ,DB_NAME(qt.[dbid]) [stmt_db_name] ,CONVERT(varchar(20),[query_hash],1) as [query_hash] ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_exec_sessions AS s LEFT OUTER JOIN sys.dm_exec_requests AS r ON s.[session_id] = r.[session_id] @@ -652,6 +675,7 @@ SELECT ,s.[yield_count] ,s.[total_cpu_usage_ms] ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_schedulers AS s ` @@ -677,11 +701,13 @@ SELECT TOP 1 ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] ,SERVERPROPERTY('ProductVersion') AS [sql_version] + ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] ,[db_online] ,[db_restoring] ,[db_recovering] ,[db_recoveryPending] ,[db_suspect] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.server_resource_stats CROSS APPLY ( SELECT @@ -708,8 +734,11 @@ SELECT TOP(1) 'sqlserver_azure_db_resource_stats' AS [measurement] ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM - sys.server_resource_stats; + sys.server_resource_stats +ORDER BY + [end_time] DESC; ` const sqlAzureMIResourceGovernance string = ` @@ -733,6 +762,7 @@ SELECT ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_instance_resource_governance; ` @@ -758,6 +788,7 @@ SELECT ,vfs.[num_of_bytes_written] AS [write_bytes] ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) ON vfs.[database_id] = mf.[database_id] @@ -778,6 +809,7 @@ SELECT ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] ,mc.[type] AS [clerk_type] ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) GROUP BY mc.[type] @@ -860,6 +892,7 @@ SELECT 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' ELSE 'Other' END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) WHERE ws.[wait_type] NOT IN ( @@ -951,100 +984,110 @@ WITH PerfCounters AS ( END WHERE counter_name IN ( - ''SQL Compilations/sec'' - ,''SQL Re-Compilations/sec'' - ,''User Connections'' - ,''Batch Requests/sec'' - ,''Logouts/sec'' - ,''Logins/sec'' - ,''Processes blocked'' - ,''Latch Waits/sec'' - ,''Full Scans/sec'' - ,''Index Searches/sec'' - ,''Page Splits/sec'' - ,''Page lookups/sec'' - ,''Page reads/sec'' - ,''Page writes/sec'' - ,''Readahead pages/sec'' - ,''Lazy writes/sec'' - ,''Checkpoint pages/sec'' - ,''Page life expectancy'' - ,''Log File(s) Size (KB)'' - ,''Log File(s) Used Size (KB)'' - ,''Data File(s) Size (KB)'' - ,''Transactions/sec'' - ,''Write Transactions/sec'' - ,''Active Temp Tables'' - ,''Temp Tables Creation Rate'' - ,''Temp Tables For Destruction'' - ,''Free Space in tempdb (KB)'' - ,''Version Store Size (KB)'' - ,''Memory Grants Pending'' - ,''Memory Grants Outstanding'' - ,''Free list stalls/sec'' - ,''Buffer cache hit ratio'' - ,''Buffer cache hit ratio base'' - ,''Backup/Restore Throughput/sec'' - ,''Total Server Memory (KB)'' - ,''Target Server Memory (KB)'' - ,''Log Flushes/sec'' - ,''Log Flush Wait Time'' - ,''Memory broker clerk size'' - ,''Log Bytes Flushed/sec'' - ,''Bytes Sent to Replica/sec'' - ,''Log Send Queue'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Replica/sec'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Transport/sec'' - ,''Bytes Received from Replica/sec'' - ,''Receives from Replica/sec'' - ,''Flow Control Time (ms/sec)'' - ,''Flow Control/sec'' - ,''Resent Messages/sec'' - ,''Redone Bytes/sec'' - ,''XTP Memory Used (KB)'' - ,''Transaction Delay'' - ,''Log Bytes Received/sec'' - ,''Log Apply Pending Queue'' - ,''Redone Bytes/sec'' - ,''Recovery Queue'' - ,''Log Apply Ready Queue'' - ,''CPU usage %'' - ,''CPU usage % base'' - ,''Queued requests'' - ,''Requests completed/sec'' - ,''Blocked tasks'' - ,''Active memory grant amount (KB)'' - ,''Disk Read Bytes/sec'' - ,''Disk Read IO Throttled/sec'' - ,''Disk Read IO/sec'' - ,''Disk Write Bytes/sec'' - ,''Disk Write IO Throttled/sec'' - ,''Disk Write IO/sec'' - ,''Used memory (KB)'' - ,''Forwarded Records/sec'' - ,''Background Writer pages/sec'' - ,''Percent Log Used'' - ,''Log Send Queue KB'' - ,''Redo Queue KB'' - ,''Mirrored Write Transactions/sec'' - ,''Group Commit Time'' - ,''Group Commits/Sec'' + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' ) OR ( - spi.[object_name] LIKE ''%User Settable%'' - OR spi.[object_name] LIKE ''%SQL Errors%'' - OR spi.[object_name] LIKE ''%Batch Resp Statistics%'' + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' ) OR ( - spi.[instance_name] IN (''_Total'') + spi.[instance_name] IN ('_Total') AND spi.[counter_name] IN ( - ''Lock Timeouts/sec'' - ,''Lock Timeouts (timeout > 0)/sec'' - ,''Number of Deadlocks/sec'' - ,''Lock Waits/sec'' - ,''Latch Waits/sec'' + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' ) ) +) INSERT INTO @PCounters select * from PerfCounters @@ -1057,8 +1100,9 @@ SELECT WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value], + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability from @PCounters pc LEFT OUTER JOIN @PCounters AS pc1 ON ( @@ -1088,8 +1132,7 @@ SELECT ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] ,DB_NAME() as [database_name] ,s.[session_id] - ,ISNULL(r.[request_id], 0) as [request_id] - ,DB_NAME(s.[database_id]) as [session_db_name] + ,ISNULL(r.[request_id], 0) as [request_id] ,COALESCE(r.[status], s.[status]) AS [status] ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] @@ -1103,6 +1146,7 @@ SELECT ,s.[program_name] ,s.[host_name] ,s.[nt_user_name] + ,s.[login_name] ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) WHEN 0 THEN '0-Read Committed' @@ -1129,6 +1173,7 @@ SELECT ,CONVERT(varchar(20),[query_hash],1) as [query_hash] ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_exec_sessions AS s LEFT OUTER JOIN sys.dm_exec_requests AS r ON s.[session_id] = r.[session_id] @@ -1171,5 +1216,6 @@ SELECT ,s.[yield_count] ,s.[total_cpu_usage_ms] ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_schedulers AS s ` diff --git a/plugins/inputs/sqlserver/connectionstring.go b/plugins/inputs/sqlserver/connectionstring.go new file mode 100644 index 0000000000000..b5f530b9f9510 --- /dev/null +++ b/plugins/inputs/sqlserver/connectionstring.go @@ -0,0 +1,100 @@ +package sqlserver + +import ( + "net/url" + "strings" +) + +const ( + emptySQLInstance = "" + emptyDatabaseName = "" +) + +// getConnectionIdentifiers returns the sqlInstance and databaseName from the given connection string. +// The name of the SQL instance is returned as-is in the connection string +// If the connection string could not be parsed or sqlInstance/databaseName were not present, a placeholder value is returned +func getConnectionIdentifiers(connectionString string) (sqlInstance string, databaseName string) { + if len(connectionString) == 0 { + return emptySQLInstance, emptyDatabaseName + } + + trimmedConnectionString := strings.TrimSpace(connectionString) + + if strings.HasPrefix(trimmedConnectionString, "odbc:") { + connectionStringWithoutOdbc := strings.TrimPrefix(trimmedConnectionString, "odbc:") + return parseConnectionStringKeyValue(connectionStringWithoutOdbc) + } + if strings.HasPrefix(trimmedConnectionString, "sqlserver://") { + return parseConnectionStringURL(trimmedConnectionString) + } + return parseConnectionStringKeyValue(trimmedConnectionString) +} + +// parseConnectionStringKeyValue parses a "key=value;" connection string and returns the SQL instance and database name +func parseConnectionStringKeyValue(connectionString string) (sqlInstance string, databaseName string) { + sqlInstance = "" + databaseName = "" + + keyValuePairs := strings.Split(connectionString, ";") + for _, keyValuePair := range keyValuePairs { + if len(keyValuePair) == 0 { + continue + } + + keyAndValue := strings.SplitN(keyValuePair, "=", 2) + key := strings.TrimSpace(strings.ToLower(keyAndValue[0])) + if len(key) == 0 { + continue + } + + value := "" + if len(keyAndValue) > 1 { + value = strings.TrimSpace(keyAndValue[1]) + } + if strings.EqualFold("server", key) { + sqlInstance = value + continue + } + if strings.EqualFold("database", key) { + databaseName = value + } + } + + if sqlInstance == "" { + sqlInstance = emptySQLInstance + } + if databaseName == "" { + databaseName = emptyDatabaseName + } + + return sqlInstance, databaseName +} + +// parseConnectionStringURL parses a URL-formatted connection string and returns the SQL instance and database name +func parseConnectionStringURL(connectionString string) (sqlInstance string, databaseName string) { + sqlInstance = emptySQLInstance + databaseName = emptyDatabaseName + + u, err := url.Parse(connectionString) + if err != nil { + return emptySQLInstance, emptyDatabaseName + } + + sqlInstance = u.Hostname() + + if len(u.Path) > 1 { + // There was a SQL instance name specified in addition to the host + // E.g. "the.host.com:1234/InstanceName" or "the.host.com/InstanceName" + sqlInstance = sqlInstance + "\\" + u.Path[1:] + } + + query := u.Query() + for key, value := range query { + if strings.EqualFold("database", key) { + databaseName = value[0] + break + } + } + + return sqlInstance, databaseName +} diff --git a/plugins/inputs/sqlserver/sqlqueriesV2.go b/plugins/inputs/sqlserver/sqlqueriesV2.go index 66b1bdf5976b5..3521cc9571661 100644 --- a/plugins/inputs/sqlserver/sqlqueriesV2.go +++ b/plugins/inputs/sqlserver/sqlqueriesV2.go @@ -1348,37 +1348,62 @@ IF @EngineEdition IN (2,3,4) AND @MajorMinorVersion >= 1050 END ` -const sqlServerCpuV2 string = ` +const sqlServerCPUV2 string = ` /*The ring buffer has a new value every minute*/ IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/ BEGIN -SELECT - 'sqlserver_cpu' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[SQLProcessUtilization] AS [sqlserver_process_cpu] - ,[SystemIdle] AS [system_idle_cpu] - ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] -FROM ( - SELECT TOP 1 - [record_id] - /*,dateadd(ms, (y.[timestamp] - (SELECT CAST([ms_ticks] AS BIGINT) FROM sys.dm_os_sys_info)), GETDATE()) AS [EventTime] --use for check/debug purpose*/ - ,[SQLProcessUtilization] - ,[SystemIdle] +;WITH utilization_cte AS +( + SELECT + [SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] FROM ( - SELECT record.value('(./Record/@id)[1]', 'int') AS [record_id] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] - ,[TIMESTAMP] + SELECT TOP 1 + [record_id] + ,[SQLProcessUtilization] + ,[SystemIdle] FROM ( - SELECT [TIMESTAMP] - ,convert(XML, [record]) AS [record] - FROM sys.dm_os_ring_buffers - WHERE [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' - AND [record] LIKE '%%' - ) AS x - ) AS y - ORDER BY record_id DESC -) as z + SELECT + record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT + [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE + [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY [record_id] DESC + ) AS z +), +processor_Info_cte AS +( + SELECT (cpu_count / hyperthread_ratio) as number_of_physical_cpus +  FROM sys.dm_os_sys_info +) +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[sqlserver_process_cpu] + ,[system_idle_cpu] + ,100 - [system_idle_cpu] - [sqlserver_process_cpu] AS [other_process_cpu] +FROM + ( + SELECT + (case + when [other_process_cpu] < 0 then [sqlserver_process_cpu] / a.number_of_physical_cpus + else [sqlserver_process_cpu] +  end) as [sqlserver_process_cpu] + ,[system_idle_cpu] + FROM utilization_cte + CROSS APPLY processor_Info_cte a + ) AS b END ` diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 2ed4df266598f..4a965bec15afd 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -2,12 +2,15 @@ package sqlserver import ( "database/sql" + "errors" "fmt" "log" + "strings" "sync" "time" - _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization + "github.com/Azure/go-autorest/autorest/adal" + mssql "github.com/denisenkom/go-mssqldb" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" @@ -15,14 +18,18 @@ import ( // SQLServer struct type SQLServer struct { - Servers []string `toml:"servers"` - QueryVersion int `toml:"query_version"` - AzureDB bool `toml:"azuredb"` - DatabaseType string `toml:"database_type"` - IncludeQuery []string `toml:"include_query"` - ExcludeQuery []string `toml:"exclude_query"` - queries MapQuery - isInitialized bool + Servers []string `toml:"servers"` + AuthMethod string `toml:"auth_method"` + QueryVersion int `toml:"query_version"` + AzureDB bool `toml:"azuredb"` + DatabaseType string `toml:"database_type"` + IncludeQuery []string `toml:"include_query"` + ExcludeQuery []string `toml:"exclude_query"` + HealthMetric bool `toml:"health_metric"` + pools []*sql.DB + queries MapQuery + adalToken *adal.Token + muCacheLock sync.RWMutex } // Query struct @@ -36,8 +43,32 @@ type Query struct { // MapQuery type type MapQuery map[string]Query +// HealthMetric struct tracking the number of attempted vs successful connections for each connection string +type HealthMetric struct { + AttemptedQueries int + SuccessfulQueries int +} + const defaultServer = "Server=.;app name=telegraf;log=1;" +const ( + typeAzureSQLDB = "AzureSQLDB" + typeAzureSQLManagedInstance = "AzureSQLManagedInstance" + typeSQLServer = "SQLServer" +) + +const ( + healthMetricName = "sqlserver_telegraf_health" + healthMetricInstanceTag = "sql_instance" + healthMetricDatabaseTag = "database_name" + healthMetricAttemptedQueries = "attempted_queries" + healthMetricSuccessfulQueries = "successful_queries" + healthMetricDatabaseType = "database_type" +) + +// resource id for Azure SQL Database +const sqlAzureResourceID = "https://database.windows.net/" + const sampleConfig = ` ## Specify instances to monitor with a list of connection strings. ## All connection parameters are optional. @@ -46,62 +77,62 @@ const sampleConfig = ` ## See https://github.com/denisenkom/go-mssqldb for detailed connection ## parameters, in particular, tls connections can be created like so: ## "encrypt=true;certificate=;hostNameInCertificate=" -# servers = [ -# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# ] - -## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -## Possible values for database_type are -## "AzureSQLDB" -## "SQLServer" -## "AzureSQLManagedInstance" +servers = [ + "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +] + +## Authentication method +## valid methods: "connection_string", "AAD" +# auth_method = "connection_string" + +## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" + +## Queries enabled by default for database_type = "AzureSQLDB" are - +## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + # database_type = "AzureSQLDB" +## A list of queries to include. If not specified, all the above listed queries are used. +# include_query = [] + +## A list of queries to explicitly ignore. +# exclude_query = [] + +## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers + +# database_type = "AzureSQLManagedInstance" + +# include_query = [] + +# exclude_query = [] + +## Queries enabled by default for database_type = "SQLServer" are - +## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu + +database_type = "SQLServer" + +include_query = [] + +## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + +## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +## the new mechanism of identifying the database_type there by use it's corresponding queries ## Optional parameter, setting this to 2 will use a new version ## of the collection queries that break compatibility with the original ## dashboards. ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -query_version = 2 +# query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false - -## Possible queries -## Version 2: -## - PerformanceCounters -## - WaitStatsCategorized -## - DatabaseIO -## - ServerProperties -## - MemoryClerk -## - Schedulers -## - SqlRequests -## - VolumeSpace -## - Cpu - -## Version 1: -## - PerformanceCounters -## - WaitStatsCategorized -## - CPUHistory -## - DatabaseIO -## - DatabaseSize -## - DatabaseStats -## - DatabaseProperties -## - MemoryClerk -## - VolumeSpace -## - PerformanceMetrics - - -## Queries enabled by default for specific Database Type -## database_type = AzureSQLDB - ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO - -## A list of queries to include. If not specified, all the above listed queries are used. -# include_query = [] - -## A list of queries to explicitly ignore. -exclude_query = [ 'Schedulers' , 'SqlRequests'] ` // SampleConfig return the sample configuration @@ -128,7 +159,7 @@ func initQueries(s *SQLServer) error { // Constant defintiions for type "AzureSQLDB" start with sqlAzureDB // Constant defintiions for type "AzureSQLManagedInstance" start with sqlAzureMI // Constant defintiions for type "SQLServer" start with sqlServer - if s.DatabaseType == "AzureSQLDB" { + if s.DatabaseType == typeAzureSQLDB { queries["AzureSQLDBResourceStats"] = Query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false} queries["AzureSQLDBResourceGovernance"] = Query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false} queries["AzureSQLDBWaitStats"] = Query{ScriptName: "AzureSQLDBWaitStats", Script: sqlAzureDBWaitStats, ResultByRow: false} @@ -139,7 +170,7 @@ func initQueries(s *SQLServer) error { queries["AzureSQLDBPerformanceCounters"] = Query{ScriptName: "AzureSQLDBPerformanceCounters", Script: sqlAzureDBPerformanceCounters, ResultByRow: false} queries["AzureSQLDBRequests"] = Query{ScriptName: "AzureSQLDBRequests", Script: sqlAzureDBRequests, ResultByRow: false} queries["AzureSQLDBSchedulers"] = Query{ScriptName: "AzureSQLDBSchedulers", Script: sqlAzureDBSchedulers, ResultByRow: false} - } else if s.DatabaseType == "AzureSQLManagedInstance" { + } else if s.DatabaseType == typeAzureSQLManagedInstance { queries["AzureSQLMIResourceStats"] = Query{ScriptName: "AzureSQLMIResourceStats", Script: sqlAzureMIResourceStats, ResultByRow: false} queries["AzureSQLMIResourceGovernance"] = Query{ScriptName: "AzureSQLMIResourceGovernance", Script: sqlAzureMIResourceGovernance, ResultByRow: false} queries["AzureSQLMIDatabaseIO"] = Query{ScriptName: "AzureSQLMIDatabaseIO", Script: sqlAzureMIDatabaseIO, ResultByRow: false} @@ -149,7 +180,7 @@ func initQueries(s *SQLServer) error { queries["AzureSQLMIPerformanceCounters"] = Query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false} queries["AzureSQLMIRequests"] = Query{ScriptName: "AzureSQLMIRequests", Script: sqlAzureMIRequests, ResultByRow: false} queries["AzureSQLMISchedulers"] = Query{ScriptName: "AzureSQLMISchedulers", Script: sqlAzureMISchedulers, ResultByRow: false} - } else if s.DatabaseType == "SQLServer" { //These are still V2 queries and have not been refactored yet. + } else if s.DatabaseType == typeSQLServer { //These are still V2 queries and have not been refactored yet. queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false} queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false} queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false} @@ -158,7 +189,9 @@ func initQueries(s *SQLServer) error { queries["SQLServerSchedulers"] = Query{ScriptName: "SQLServerSchedulers", Script: sqlServerSchedulers, ResultByRow: false} queries["SQLServerRequests"] = Query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false} queries["SQLServerVolumeSpace"] = Query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false} - queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCpu, ResultByRow: false} + queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCPU, ResultByRow: false} + queries["SQLServerAvailabilityReplicaStates"] = Query{ScriptName: "SQLServerAvailabilityReplicaStates", Script: sqlServerAvailabilityReplicaStates, ResultByRow: false} + queries["SQLServerDatabaseReplicaStates"] = Query{ScriptName: "SQLServerDatabaseReplicaStates", Script: sqlServerDatabaseReplicaStates, ResultByRow: false} } else { // If this is an AzureDB instance, grab some extra metrics if s.AzureDB { @@ -176,7 +209,7 @@ func initQueries(s *SQLServer) error { queries["Schedulers"] = Query{ScriptName: "Schedulers", Script: sqlServerSchedulersV2, ResultByRow: false} queries["SqlRequests"] = Query{ScriptName: "SqlRequests", Script: sqlServerRequestsV2, ResultByRow: false} queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false} - queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCpuV2, ResultByRow: false} + queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false} } else { log.Println("W! DEPRECATED: query_version=1 has been deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true} @@ -203,8 +236,6 @@ func initQueries(s *SQLServer) error { } } - // Set a flag so we know that queries have already been initialized - s.isInitialized = true var querylist []string for query := range queries { querylist = append(querylist, query) @@ -216,47 +247,119 @@ func initQueries(s *SQLServer) error { // Gather collect data from SQL Server func (s *SQLServer) Gather(acc telegraf.Accumulator) error { - if !s.isInitialized { - if err := initQueries(s); err != nil { - acc.AddError(err) - return err - } - } - - if len(s.Servers) == 0 { - s.Servers = append(s.Servers, defaultServer) - } - var wg sync.WaitGroup + var mutex sync.Mutex + var healthMetrics = make(map[string]*HealthMetric) - for _, serv := range s.Servers { + for i, pool := range s.pools { for _, query := range s.queries { wg.Add(1) - go func(serv string, query Query) { + go func(pool *sql.DB, query Query, serverIndex int) { defer wg.Done() - acc.AddError(s.gatherServer(serv, query, acc)) - }(serv, query) + connectionString := s.Servers[serverIndex] + queryError := s.gatherServer(pool, query, acc, connectionString) + + if s.HealthMetric { + mutex.Lock() + s.gatherHealth(healthMetrics, connectionString, queryError) + mutex.Unlock() + } + + acc.AddError(queryError) + }(pool, query, i) } } wg.Wait() + + if s.HealthMetric { + s.accHealth(healthMetrics, acc) + } + return nil } -func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumulator) error { - // deferred opening - conn, err := sql.Open("mssql", server) - if err != nil { +// Start initialize a list of connection pools +func (s *SQLServer) Start(acc telegraf.Accumulator) error { + if err := initQueries(s); err != nil { + acc.AddError(err) return err } - defer conn.Close() + // initialize mutual exclusion lock + s.muCacheLock = sync.RWMutex{} + + for _, serv := range s.Servers { + var pool *sql.DB + + switch strings.ToLower(s.AuthMethod) { + case "connection_string": + // Use the DSN (connection string) directly. In this case, + // empty username/password causes use of Windows + // integrated authentication. + var err error + pool, err = sql.Open("mssql", serv) + + if err != nil { + acc.AddError(err) + continue + } + case "aad": + // AAD Auth with system-assigned managed identity (MSI) + + // AAD Auth is only supported for Azure SQL Database or Azure SQL Managed Instance + if s.DatabaseType == "SQLServer" { + err := errors.New("database connection failed : AAD auth is not supported for SQL VM i.e. DatabaseType=SQLServer") + acc.AddError(err) + continue + } + + // get token from in-memory cache variable or from Azure Active Directory + tokenProvider, err := s.getTokenProvider() + if err != nil { + acc.AddError(fmt.Errorf("error creating AAD token provider for system assigned Azure managed identity : %s", err.Error())) + continue + } + + connector, err := mssql.NewAccessTokenConnector(serv, tokenProvider) + if err != nil { + acc.AddError(fmt.Errorf("error creating the SQL connector : %s", err.Error())) + continue + } + + pool = sql.OpenDB(connector) + default: + return fmt.Errorf("unknown auth method: %v", s.AuthMethod) + } + + s.pools = append(s.pools, pool) + } + + return nil +} + +// Stop cleanup server connection pools +func (s *SQLServer) Stop() { + for _, pool := range s.pools { + _ = pool.Close() + } +} + +func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumulator, connectionString string) error { // execute query - rows, err := conn.Query(query.Script) + rows, err := pool.Query(query.Script) if err != nil { - return fmt.Errorf("Script %s failed: %w", query.ScriptName, err) - //return err + serverName, databaseName := getConnectionIdentifiers(connectionString) + + // Error msg based on the format in SSMS. SQLErrorClass() is another term for severity/level: http://msdn.microsoft.com/en-us/library/dd304156.aspx + if sqlerr, ok := err.(mssql.Error); ok { + return fmt.Errorf("Query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName, + serverName, databaseName, sqlerr.SQLErrorNumber(), sqlerr.SQLErrorClass(), sqlerr.SQLErrorState(), sqlerr.SQLErrorLineNo(), err) + } + + return fmt.Errorf("Query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err) } + defer rows.Close() // grab the column information from the result @@ -307,6 +410,10 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e } } + if s.DatabaseType != "" { + tags["measurement_db_type"] = s.DatabaseType + } + if query.ResultByRow { // add measurement to Accumulator acc.AddFields(measurement, @@ -325,8 +432,147 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e return nil } +// gatherHealth stores info about any query errors in the healthMetrics map +func (s *SQLServer) gatherHealth(healthMetrics map[string]*HealthMetric, serv string, queryError error) { + if healthMetrics[serv] == nil { + healthMetrics[serv] = &HealthMetric{} + } + + healthMetrics[serv].AttemptedQueries++ + if queryError == nil { + healthMetrics[serv].SuccessfulQueries++ + } +} + +// accHealth accumulates the query health data contained within the healthMetrics map +func (s *SQLServer) accHealth(healthMetrics map[string]*HealthMetric, acc telegraf.Accumulator) { + for connectionString, connectionStats := range healthMetrics { + sqlInstance, databaseName := getConnectionIdentifiers(connectionString) + tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: databaseName} + fields := map[string]interface{}{ + healthMetricAttemptedQueries: connectionStats.AttemptedQueries, + healthMetricSuccessfulQueries: connectionStats.SuccessfulQueries, + healthMetricDatabaseType: s.getDatabaseTypeToLog(), + } + + acc.AddFields(healthMetricName, fields, tags, time.Now()) + } +} + +// getDatabaseTypeToLog returns the type of database monitored by this plugin instance +func (s *SQLServer) getDatabaseTypeToLog() string { + if s.DatabaseType == typeAzureSQLDB || s.DatabaseType == typeAzureSQLManagedInstance || s.DatabaseType == typeSQLServer { + return s.DatabaseType + } + + logname := fmt.Sprintf("QueryVersion-%d", s.QueryVersion) + if s.AzureDB { + logname += "-AzureDB" + } + return logname +} + +func (s *SQLServer) Init() error { + if len(s.Servers) == 0 { + log.Println("W! Warning: Server list is empty.") + } + + return nil +} + +// Get Token Provider by loading cached token or refreshed token +func (s *SQLServer) getTokenProvider() (func() (string, error), error) { + var tokenString string + + // load token + s.muCacheLock.RLock() + token, err := s.loadToken() + s.muCacheLock.RUnlock() + + // if there's error while loading token or found an expired token, refresh token and save it + if err != nil || token.IsExpired() { + // refresh token within a write-lock + s.muCacheLock.Lock() + defer s.muCacheLock.Unlock() + + // load token again, in case it's been refreshed by another thread + token, err = s.loadToken() + + // check loaded token's error/validity, then refresh/save token + if err != nil || token.IsExpired() { + // get new token + spt, err := s.refreshToken() + if err != nil { + return nil, err + } + + // use the refreshed token + tokenString = spt.OAuthToken() + } else { + // use locally cached token + tokenString = token.OAuthToken() + } + } else { + // use locally cached token + tokenString = token.OAuthToken() + } + + // return acquired token + return func() (string, error) { + return tokenString, nil + }, nil +} + +// Load token from in-mem cache +func (s *SQLServer) loadToken() (*adal.Token, error) { + // This method currently does a simplistic task of reading a from variable (in-mem cache), + // however it's been structured here to allow extending the cache mechanism to a different approach in future + + if s.adalToken == nil { + return nil, fmt.Errorf("token is nil or failed to load existing token") + } + + return s.adalToken, nil +} + +// Refresh token for the resource, and save to in-mem cache +func (s *SQLServer) refreshToken() (*adal.Token, error) { + // get MSI endpoint to get a token + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + // get new token for the resource id + spt, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, sqlAzureResourceID) + if err != nil { + return nil, err + } + + // ensure token is fresh + if err := spt.EnsureFresh(); err != nil { + return nil, err + } + + // save token to local in-mem cache + s.adalToken = &adal.Token{ + AccessToken: spt.Token().AccessToken, + RefreshToken: spt.Token().RefreshToken, + ExpiresIn: spt.Token().ExpiresIn, + ExpiresOn: spt.Token().ExpiresOn, + NotBefore: spt.Token().NotBefore, + Resource: spt.Token().Resource, + Type: spt.Token().Type, + } + + return s.adalToken, nil +} + func init() { inputs.Add("sqlserver", func() telegraf.Input { - return &SQLServer{} + return &SQLServer{ + Servers: []string{defaultServer}, + AuthMethod: "connection_string", + } }) } diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 8f5d355ef4df3..a9a022bd23fa7 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -6,10 +6,9 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { @@ -34,16 +33,15 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { IncludeQuery: test["IncludeQuery"].([]string), ExcludeQuery: test["ExcludeQuery"].([]string), } - initQueries(&s) - assert.Equal(t, len(s.queries), test["queriesTotal"].(int)) + require.NoError(t, initQueries(&s)) + require.Equal(t, len(s.queries), test["queriesTotal"].(int)) for _, query := range test["queries"].([]string) { - assert.Contains(t, s.queries, query) + require.Contains(t, s.queries, query) } } } func TestSqlServer_ParseMetrics(t *testing.T) { - var acc testutil.Accumulator queries := make(MapQuery) @@ -63,7 +61,6 @@ func TestSqlServer_ParseMetrics(t *testing.T) { var fields = make(map[string]interface{}) for _, query := range queries { - mock = strings.Split(query.Script, "\n") idx := 0 @@ -78,7 +75,6 @@ func TestSqlServer_ParseMetrics(t *testing.T) { tags[headers[2]] = row[2] // tag 'type' if query.ResultByRow { - // set value by converting to float64 value, err := strconv.ParseFloat(row[3], 64) // require @@ -90,11 +86,9 @@ func TestSqlServer_ParseMetrics(t *testing.T) { tags, time.Now()) // assert acc.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"value": value}, tags) - } else { // set fields for i := 3; i < len(row); i++ { - // set value by converting to float64 value, err := strconv.ParseFloat(row[i], 64) // require @@ -113,12 +107,11 @@ func TestSqlServer_ParseMetrics(t *testing.T) { } } -func TestSqlServer_MultipleInstance(t *testing.T) { +func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { // Invoke Gather() from two separate configurations and // confirm they don't interfere with each other - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" s := &SQLServer{ Servers: []string{testServer}, @@ -130,45 +123,292 @@ func TestSqlServer_MultipleInstance(t *testing.T) { } var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) err := s.Gather(&acc) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) + require.NoError(t, s2.Start(&acc2)) err = s2.Gather(&acc2) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) // acc includes size metrics, and excludes memory metrics - assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) - assert.True(t, acc.HasMeasurement("Log size (bytes)")) + require.False(t, acc.HasMeasurement("Memory breakdown (%)")) + require.True(t, acc.HasMeasurement("Log size (bytes)")) // acc2 includes memory metrics, and excludes size metrics - assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) - assert.False(t, acc2.HasMeasurement("Log size (bytes)")) + require.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + require.False(t, acc2.HasMeasurement("Log size (bytes)")) } -func TestSqlServer_MultipleInit(t *testing.T) { +func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { + // Invoke Gather() from two separate configurations and + // confirm they don't interfere with each other. + // This test is intentionally similar to TestSqlServer_MultipleInstanceIntegration. + // It is separated to ensure that the health metric code does not affect other metrics + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + s := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"MemoryClerk"}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"DatabaseSize"}, + HealthMetric: true, + } + + var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) + err := s.Gather(&acc) + require.NoError(t, err) + require.NoError(t, s2.Start(&acc)) + err = s2.Gather(&acc2) + require.NoError(t, err) + + // acc includes size metrics, and excludes memory metrics and the health metric + require.False(t, acc.HasMeasurement(healthMetricName)) + require.False(t, acc.HasMeasurement("Memory breakdown (%)")) + require.True(t, acc.HasMeasurement("Log size (bytes)")) + + // acc2 includes memory metrics and the health metric, and excludes size metrics + require.True(t, acc2.HasMeasurement(healthMetricName)) + require.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + require.False(t, acc2.HasMeasurement("Log size (bytes)")) + + sqlInstance, database := getConnectionIdentifiers(testServer) + tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: database} + require.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricAttemptedQueries, 9)) + require.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricSuccessfulQueries, 9)) +} + +func TestSqlServer_HealthMetric(t *testing.T) { + fakeServer1 := "localhost\\fakeinstance1;Database=fakedb1;Password=ABCabc01;" + fakeServer2 := "localhost\\fakeinstance2;Database=fakedb2;Password=ABCabc01;" + + s1 := &SQLServer{ + Servers: []string{fakeServer1, fakeServer2}, + IncludeQuery: []string{"DatabaseSize", "MemoryClerk"}, + HealthMetric: true, + AuthMethod: "connection_string", + } + + s2 := &SQLServer{ + Servers: []string{fakeServer1}, + IncludeQuery: []string{"DatabaseSize"}, + AuthMethod: "connection_string", + } + + // acc1 should have the health metric because it is specified in the config + var acc1 testutil.Accumulator + require.NoError(t, s1.Start(&acc1)) + require.NoError(t, s1.Gather(&acc1)) + require.True(t, acc1.HasMeasurement(healthMetricName)) + + // There will be 2 attempted queries (because we specified 2 queries in IncludeQuery) + // Both queries should fail because the specified SQL instances do not exist + sqlInstance1, database1 := getConnectionIdentifiers(fakeServer1) + tags1 := map[string]string{healthMetricInstanceTag: sqlInstance1, healthMetricDatabaseTag: database1} + require.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricAttemptedQueries, 2)) + require.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricSuccessfulQueries, 0)) + + sqlInstance2, database2 := getConnectionIdentifiers(fakeServer2) + tags2 := map[string]string{healthMetricInstanceTag: sqlInstance2, healthMetricDatabaseTag: database2} + require.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricAttemptedQueries, 2)) + require.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricSuccessfulQueries, 0)) + + // acc2 should not have the health metric because it is not specified in the config + var acc2 testutil.Accumulator + require.NoError(t, s2.Gather(&acc2)) + require.False(t, acc2.HasMeasurement(healthMetricName)) +} + +func TestSqlServer_MultipleInit(t *testing.T) { s := &SQLServer{} s2 := &SQLServer{ ExcludeQuery: []string{"DatabaseSize"}, } - initQueries(s) + require.NoError(t, initQueries(s)) _, ok := s.queries["DatabaseSize"] - // acc includes size metrics - assert.True(t, ok) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) + require.True(t, ok) - initQueries(s2) + require.NoError(t, initQueries(s2)) _, ok = s2.queries["DatabaseSize"] - // acc2 excludes size metrics - assert.False(t, ok) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) + require.False(t, ok) + s.Stop() + s2.Stop() +} + +func TestSqlServer_ConnectionString(t *testing.T) { + // URL format + connectionString := "sqlserver://username:password@hostname.database.windows.net?database=databasename&connection+timeout=30" + sqlInstance, database := getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "databasename", database) + + connectionString = " sqlserver://hostname2.somethingelse.net:1433?database=databasename2" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname2.somethingelse.net", sqlInstance) + require.Equal(t, "databasename2", database) + + connectionString = "sqlserver://hostname3:1433/SqlInstanceName3?database=databasename3" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname3\\SqlInstanceName3", sqlInstance) + require.Equal(t, "databasename3", database) + + connectionString = " sqlserver://hostname4/SqlInstanceName4?database=databasename4&connection%20timeout=30" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname4\\SqlInstanceName4", sqlInstance) + require.Equal(t, "databasename4", database) + + connectionString = " sqlserver://username:password@hostname5?connection%20timeout=30" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname5", sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + // odbc format + connectionString = "odbc:server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "master", database) + + connectionString = " odbc:server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb " + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "192.168.0.1", sqlInstance) + require.Equal(t, "mydb", database) + + connectionString = " odbc:Server=servername\\instancename;Database=dbname;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "servername\\instancename", sqlInstance) + require.Equal(t, "dbname", database) + + connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname2.database.windows.net", sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + connectionString = "invalid connection string" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, emptySQLInstance, sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + // Key/value format + connectionString = " server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "master", database) + + connectionString = " server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "192.168.0.1", sqlInstance) + require.Equal(t, "mydb", database) + + connectionString = "Server=servername\\instancename;Database=dbname; " + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "servername\\instancename", sqlInstance) + require.Equal(t, "dbname", database) + + connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true " + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname2.database.windows.net", sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + connectionString = "invalid connection string" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, emptySQLInstance, sqlInstance) + require.Equal(t, emptyDatabaseName, database) +} + +func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { + // This test case checks where Availability Group (AG / HADR) queries return an output when included for processing for DatabaseType = SQLServer + // And they should not be processed when DatabaseType = AzureSQLDB + + // Please change the connection string to connect to relevant database when executing the test case + + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + + testServer := "Server=127.0.0.1;Port=1433;Database=testdb1;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + + s := &SQLServer{ + Servers: []string{testServer}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + DatabaseType: "AzureSQLDB", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + + var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) + err := s.Gather(&acc) + require.NoError(t, err) + + err = s2.Gather(&acc2) + require.NoError(t, s2.Start(&acc)) + require.NoError(t, err) + + // acc includes size metrics, and excludes memory metrics + require.True(t, acc.HasMeasurement("sqlserver_hadr_replica_states")) + require.True(t, acc.HasMeasurement("sqlserver_hadr_dbreplica_states")) + + // acc2 includes memory metrics, and excludes size metrics + require.False(t, acc2.HasMeasurement("sqlserver_hadr_replica_states")) + require.False(t, acc2.HasMeasurement("sqlserver_hadr_dbreplica_states")) + s.Stop() + s2.Stop() +} + +func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { + // This test case checks where Availability Group (AG / HADR) queries return specific fields supported by corresponding SQL Server version database being connected to. + + // Please change the connection strings to connect to relevant database when executing the test case + + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + + testServer2019 := "Server=127.0.0.10;Port=1433;Database=testdb2019;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + testServer2012 := "Server=127.0.0.20;Port=1433;Database=testdb2012;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + + s2019 := &SQLServer{ + Servers: []string{testServer2019}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + s2012 := &SQLServer{ + Servers: []string{testServer2012}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + + var acc2019, acc2012 testutil.Accumulator + require.NoError(t, s2019.Start(&acc2019)) + err := s2019.Gather(&acc2019) + require.NoError(t, err) + + err = s2012.Gather(&acc2012) + require.NoError(t, s2012.Start(&acc2012)) + require.NoError(t, err) + + // acc2019 includes new HADR query fields + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "basic_features")) + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "is_distributed")) + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + require.True(t, acc2019.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + require.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + require.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) + + // acc2012 does not include new HADR query fields + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "basic_features")) + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "is_distributed")) + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + require.False(t, acc2012.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + require.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + require.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) + s2019.Stop() + s2012.Stop() } const mockPerformanceMetrics = `measurement;servername;type;Point In Time Recovery;Available physical memory (bytes);Average pending disk IO;Average runnable tasks;Average tasks;Buffer pool rate (bytes/sec);Connection memory per connection (bytes);Memory grant pending;Page File Usage (%);Page lookup per batch request;Page split per batch request;Readahead per page read;Signal wait (%);Sql compilation per batch request;Sql recompilation per batch request;Total target memory ratio diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index d413986037c02..49bde3fb915a2 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -9,11 +9,11 @@ import ( // Variable @MajorMinorVersion: // - 1000 --> SQL Server 2008 // - 1050 --> SQL Server 2008 R2 -// - 1011 --> SQL Server 2012 -// - 1012 --> SQL Server 2014 -// - 1013 --> SQL Server 2016 -// - 1014 --> SQL Server 2017 -// - 1015 --> SQL Server 2019 +// - 1100 --> SQL Server 2012 +// - 1200 --> SQL Server 2014 +// - 1300 --> SQL Server 2016 +// - 1400 --> SQL Server 2017 +// - 1500 --> SQL Server 2019 // Thanks Bob Ward (http://aka.ms/bobwardms) // and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs) @@ -216,6 +216,7 @@ SELECT ,CAST(SERVERPROPERTY(''EngineEdition'') AS int) AS [engine_edition] ,DATEDIFF(MINUTE,si.[sqlserver_start_time],GETDATE()) AS [uptime] ,SERVERPROPERTY(''ProductVersion'') AS [sql_version] + ,LEFT(@@VERSION,CHARINDEX('' - '',@@VERSION)) AS [sql_version_desc] ,dbs.[db_online] ,dbs.[db_restoring] ,dbs.[db_recovering] @@ -281,6 +282,17 @@ FROM sys.dm_os_schedulers AS s' EXEC sp_executesql @SqlStatement ` +/* +This string defines a SQL statements to retrieve Performance Counters as documented here - + SQL Server Performance Objects - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects?view=sql-server-ver15#SQLServerPOs +Some of the specific objects used are - + MSSQL$*:Access Methods - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object?view=sql-server-ver15 + MSSQL$*:Buffer Manager - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object?view=sql-server-ver15 + MSSQL$*:Databases - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-ver15 + MSSQL$*:General Statistics - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object?view=sql-server-ver15 + MSSQL$*:Exec Statistics - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-execstatistics-object?view=sql-server-ver15 + SQLServer:Query Store - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-query-store-object?view=sql-server-ver15 +*/ const sqlServerPerformanceCounters string = ` SET DEADLOCK_PRIORITY -10; IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ @@ -292,8 +304,6 @@ END DECLARE @SqlStatement AS nvarchar(max) ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) - ,@Columns AS nvarchar(MAX) = '' - ,@PivotColumns AS nvarchar(MAX) = '' DECLARE @PCounters TABLE ( @@ -305,7 +315,7 @@ DECLARE @PCounters TABLE PRIMARY KEY([object_name], [counter_name], [instance_name]) ); -SET @SqlStatement = N' +WITH PerfCounters AS ( SELECT DISTINCT RTRIM(spi.[object_name]) [object_name] ,RTRIM(spi.[counter_name]) [counter_name] @@ -315,138 +325,112 @@ SELECT DISTINCT FROM sys.dm_os_performance_counters AS spi WHERE counter_name IN ( - ''SQL Compilations/sec'' - ,''SQL Re-Compilations/sec'' - ,''User Connections'' - ,''Batch Requests/sec'' - ,''Logouts/sec'' - ,''Logins/sec'' - ,''Processes blocked'' - ,''Latch Waits/sec'' - ,''Full Scans/sec'' - ,''Index Searches/sec'' - ,''Page Splits/sec'' - ,''Page lookups/sec'' - ,''Page reads/sec'' - ,''Page writes/sec'' - ,''Readahead pages/sec'' - ,''Lazy writes/sec'' - ,''Checkpoint pages/sec'' - ,''Page life expectancy'' - ,''Log File(s) Size (KB)'' - ,''Log File(s) Used Size (KB)'' - ,''Data File(s) Size (KB)'' - ,''Transactions/sec'' - ,''Write Transactions/sec'' - ,''Active Temp Tables'' - ,''Temp Tables Creation Rate'' - ,''Temp Tables For Destruction'' - ,''Free Space in tempdb (KB)'' - ,''Version Store Size (KB)'' - ,''Memory Grants Pending'' - ,''Memory Grants Outstanding'' - ,''Free list stalls/sec'' - ,''Buffer cache hit ratio'' - ,''Buffer cache hit ratio base'' - ,''Backup/Restore Throughput/sec'' - ,''Total Server Memory (KB)'' - ,''Target Server Memory (KB)'' - ,''Log Flushes/sec'' - ,''Log Flush Wait Time'' - ,''Memory broker clerk size'' - ,''Log Bytes Flushed/sec'' - ,''Bytes Sent to Replica/sec'' - ,''Log Send Queue'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Replica/sec'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Transport/sec'' - ,''Bytes Received from Replica/sec'' - ,''Receives from Replica/sec'' - ,''Flow Control Time (ms/sec)'' - ,''Flow Control/sec'' - ,''Resent Messages/sec'' - ,''Redone Bytes/sec'' - ,''XTP Memory Used (KB)'' - ,''Transaction Delay'' - ,''Log Bytes Received/sec'' - ,''Log Apply Pending Queue'' - ,''Redone Bytes/sec'' - ,''Recovery Queue'' - ,''Log Apply Ready Queue'' - ,''CPU usage %'' - ,''CPU usage % base'' - ,''Queued requests'' - ,''Requests completed/sec'' - ,''Blocked tasks'' - ,''Active memory grant amount (KB)'' - ,''Disk Read Bytes/sec'' - ,''Disk Read IO Throttled/sec'' - ,''Disk Read IO/sec'' - ,''Disk Write Bytes/sec'' - ,''Disk Write IO Throttled/sec'' - ,''Disk Write IO/sec'' - ,''Used memory (KB)'' - ,''Forwarded Records/sec'' - ,''Background Writer pages/sec'' - ,''Percent Log Used'' - ,''Log Send Queue KB'' - ,''Redo Queue KB'' - ,''Mirrored Write Transactions/sec'' - ,''Group Commit Time'' - ,''Group Commits/Sec'' + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' ) OR ( - spi.[object_name] LIKE ''%User Settable%'' - OR spi.[object_name] LIKE ''%SQL Errors%'' - OR spi.[object_name] LIKE ''%Batch Resp Statistics%'' + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' ) OR ( - spi.[instance_name] IN (''_Total'') + spi.[instance_name] IN ('_Total') AND spi.[counter_name] IN ( - ''Lock Timeouts/sec'' - ,''Lock Timeouts (timeout > 0)/sec'' - ,''Number of Deadlocks/sec'' - ,''Lock Waits/sec'' - ,''Latch Waits/sec'' + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' ) ) -' - -INSERT INTO @PCounters EXEC(@SqlStatement) - -IF @MajorMinorVersion >= 1300 BEGIN - SET @Columns += N' - ,rgwg.[total_cpu_usage_preemptive_ms] AS [Preemptive CPU Usage (time)]' - SET @PivotColumns += N',[Preemptive CPU Usage (time)]' -END - -SET @SqlStatement = N' -SELECT - ''SQLServer:Workload Group Stats'' AS [object] - ,[counter] - ,[instance] - ,CAST(vs.[value] AS bigint) AS [value] - ,1 -FROM -( - SELECT - rgwg.[name] AS [instance] - ,rgwg.[total_request_count] AS [Request Count] - ,rgwg.[total_queued_request_count] AS [Queued Request Count] - ,rgwg.[total_cpu_limit_violation_count] AS [CPU Limit Violation Count] - ,rgwg.[total_cpu_usage_ms] AS [CPU Usage (time)] - ,rgwg.[total_lock_wait_count] AS [Lock Wait Count] - ,rgwg.[total_lock_wait_time_ms] AS [Lock Wait Time] - ,rgwg.[total_reduced_memgrant_count] AS [Reduced Memory Grant Count]' - + @Columns + N' - FROM sys.dm_resource_governor_workload_groups AS rgwg - INNER JOIN sys.dm_resource_governor_resource_pools AS rgrp /*No fields from this table. remove?*/ - ON rgwg.[pool_id] = rgrp.[pool_id] -) AS rg -UNPIVOT ( - [value] FOR [counter] IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], [Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ' + @PivotColumns + N') -) AS vs' +) -INSERT INTO @PCounters EXEC(@SqlStatement) +INSERT INTO @PCounters SELECT * FROM PerfCounters; SELECT 'sqlserver_performance' AS [measurement] @@ -1081,6 +1065,7 @@ SELECT ,s.[program_name] ,s.[host_name] ,s.[nt_user_name] + ,s.[login_name] ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) WHEN 0 THEN ''0-Read Committed'' WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)'' @@ -1149,40 +1134,190 @@ IF @MajorMinorVersion >= 1050 BEGIN END ` -const sqlServerRingBufferCpu string = ` +const sqlServerRingBufferCPU string = ` IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; RAISERROR (@ErrorMessage,11,1) RETURN -END +END; -SELECT - 'sqlserver_cpu' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[SQLProcessUtilization] AS [sqlserver_process_cpu] - ,[SystemIdle] AS [system_idle_cpu] - ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] -FROM ( - SELECT TOP 1 - [record_id] - ,[SQLProcessUtilization] - ,[SystemIdle] +WITH utilization_cte AS +( + SELECT + [SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] FROM ( - SELECT - record.value('(./Record/@id)[1]', 'int') AS [record_id] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] - ,[TIMESTAMP] + SELECT TOP 1 + [record_id] + ,[SQLProcessUtilization] + ,[SystemIdle] FROM ( SELECT - [TIMESTAMP] - ,convert(XML, [record]) AS [record] - FROM sys.dm_os_ring_buffers - WHERE - [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' - AND [record] LIKE '%%' - ) AS x - ) AS y - ORDER BY [record_id] DESC -) AS z + record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT + [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE + [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY [record_id] DESC + ) AS z +), +processor_Info_cte AS +( + SELECT ([cpu_count] / [hyperthread_ratio]) as [number_of_physical_cpus] + FROM sys.dm_os_sys_info +) +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[sqlserver_process_cpu] + ,[system_idle_cpu] + ,100 - [system_idle_cpu] - [sqlserver_process_cpu] AS [other_process_cpu] +FROM ( + SELECT + (CASE + WHEN u.[other_process_cpu] < 0 THEN u.[sqlserver_process_cpu] / p.[number_of_physical_cpus] + ELSE u.[sqlserver_process_cpu] + END) AS [sqlserver_process_cpu] + ,u.[system_idle_cpu] + FROM utilization_cte AS u + CROSS APPLY processor_Info_cte AS p + ) AS b +` + +// Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup +// Certain fields are only supported on SQL Server 2016 and newer version, identified by check MajorMinorVersion >= 1300 +const sqlServerAvailabilityReplicaStates string = ` +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + ,@Columns AS nvarchar(MAX) = '' + +IF @MajorMinorVersion >= 1300 BEGIN + SET @Columns += N' + ,ag.basic_features + ,ag.is_distributed + ,ar.seeding_mode + ,ar.seeding_mode_desc' +END + +SET @SqlStatement = N' +IF SERVERPROPERTY(''IsHadrEnabled'') = 1 BEGIN + SELECT + ''sqlserver_hadr_replica_states'' AS [measurement] + ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance] + ,convert(nvarchar(36), hars.replica_id) as replica_id + ,ar.replica_server_name + ,convert(nvarchar(36), hars.group_id) as group_id + ,ag.name AS group_name + ,hags.synchronization_health_desc AS ag_synchronization_health_desc + ,ar.replica_metadata_id + ,ar.availability_mode + ,ar.availability_mode_desc + ,ar.failover_mode + ,ar.failover_mode_desc + ,ar.session_timeout + ,ar.primary_role_allow_connections + ,ar.primary_role_allow_connections_desc + ,ar.secondary_role_allow_connections + ,ar.secondary_role_allow_connections_desc + ,hars.is_local + ,hars.role + ,hars.role_desc + ,hars.operational_state + ,hars.operational_state_desc + ,hars.connected_state + ,hars.connected_state_desc + ,hars.recovery_health + ,hars.recovery_health_desc + ,hars.synchronization_health AS replica_synchronization_health + ,hars.synchronization_health_desc AS replica_synchronization_health_desc + ,hars.last_connect_error_number + ,hars.last_connect_error_description + ,hars.last_connect_error_timestamp' + + @Columns + N' + FROM sys.dm_hadr_availability_replica_states AS hars + INNER JOIN sys.availability_replicas AS ar on hars.replica_id = ar.replica_id + INNER JOIN sys.availability_groups AS ag on ar.group_id = ag.group_id + INNER JOIN sys.dm_hadr_availability_group_states AS hags ON hags.group_id = ag.group_id +END' + +EXEC sp_executesql @SqlStatement +` + +// Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup +// Certain fields are only supported on SQL Server 2016 and newer version, or SQL Server 2014 and newer, identified by check MajorMinorVersion >= 1300 or MajorMinorVersion >= 1200 +const sqlServerDatabaseReplicaStates string = ` +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + ,@Columns AS nvarchar(MAX) = '' + +IF @MajorMinorVersion >= 1200 BEGIN + SET @Columns += N' + ,is_primary_replica' +END + +IF @MajorMinorVersion >= 1300 BEGIN + SET @Columns += N' + ,secondary_lag_seconds' +END + +SET @SqlStatement = N' +IF SERVERPROPERTY(''IsHadrEnabled'') = 1 BEGIN + SELECT + ''sqlserver_hadr_dbreplica_states'' AS [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,database_id + ,db_name(database_id) as database_name + ,convert(nvarchar(36), drs.replica_id) as replica_id + ,ar.replica_server_name + ,convert(nvarchar(36), drs.group_database_id) as group_database_id + ,synchronization_state + ,synchronization_state_desc + ,is_commit_participant + ,synchronization_health + ,synchronization_health_desc + ,database_state + ,database_state_desc + ,is_suspended + ,suspend_reason + ,suspend_reason_desc + ,last_sent_time + ,last_received_time + ,last_hardened_time + ,last_redone_time + ,log_send_queue_size + ,log_send_rate + ,redo_queue_size + ,redo_rate + ,filestream_send_rate + ,last_commit_time' + + @Columns + N' + FROM sys.dm_hadr_database_replica_states AS drs + INNER JOIN sys.availability_replicas AS ar on drs.replica_id = ar.replica_id +END' + +EXEC sp_executesql @SqlStatement ` diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 431076743101a..b1d6ea59d2f3b 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -9,11 +9,9 @@ import ( "sync" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" - googlepbduration "github.com/golang/protobuf/ptypes/duration" - googlepbts "github.com/golang/protobuf/ptypes/timestamp" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. @@ -22,6 +20,8 @@ import ( distributionpb "google.golang.org/genproto/googleapis/api/distribution" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -108,9 +108,9 @@ const ( ) var ( - defaultCacheTTL = internal.Duration{Duration: 1 * time.Hour} - defaultWindow = internal.Duration{Duration: 1 * time.Minute} - defaultDelay = internal.Duration{Duration: 5 * time.Minute} + defaultCacheTTL = config.Duration(1 * time.Hour) + defaultWindow = config.Duration(1 * time.Minute) + defaultDelay = config.Duration(5 * time.Minute) ) type ( @@ -118,9 +118,9 @@ type ( Stackdriver struct { Project string `toml:"project"` RateLimit int `toml:"rate_limit"` - Window internal.Duration `toml:"window"` - Delay internal.Duration `toml:"delay"` - CacheTTL internal.Duration `toml:"cache_ttl"` + Window config.Duration `toml:"window"` + Delay config.Duration `toml:"delay"` + CacheTTL config.Duration `toml:"cache_ttl"` MetricTypePrefixInclude []string `toml:"metric_type_prefix_include"` MetricTypePrefixExclude []string `toml:"metric_type_prefix_exclude"` GatherRawDistributionBuckets bool `toml:"gather_raw_distribution_buckets"` @@ -201,24 +201,24 @@ func (g *lockedSeriesGrouper) Add( } // ListMetricDescriptors implements metricClient interface -func (c *stackdriverMetricClient) ListMetricDescriptors( +func (smc *stackdriverMetricClient) ListMetricDescriptors( ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, ) (<-chan *metricpb.MetricDescriptor, error) { mdChan := make(chan *metricpb.MetricDescriptor, 1000) go func() { - c.log.Debugf("List metric descriptor request filter: %s", req.Filter) + smc.log.Debugf("List metric descriptor request filter: %s", req.Filter) defer close(mdChan) // Iterate over metric descriptors and send them to buffered channel - mdResp := c.conn.ListMetricDescriptors(ctx, req) - c.listMetricDescriptorsCalls.Incr(1) + mdResp := smc.conn.ListMetricDescriptors(ctx, req) + smc.listMetricDescriptorsCalls.Incr(1) for { mdDesc, mdErr := mdResp.Next() if mdErr != nil { if mdErr != iterator.Done { - c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) + smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) } break } @@ -230,24 +230,24 @@ func (c *stackdriverMetricClient) ListMetricDescriptors( } // ListTimeSeries implements metricClient interface -func (c *stackdriverMetricClient) ListTimeSeries( +func (smc *stackdriverMetricClient) ListTimeSeries( ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, ) (<-chan *monitoringpb.TimeSeries, error) { tsChan := make(chan *monitoringpb.TimeSeries, 1000) go func() { - c.log.Debugf("List time series request filter: %s", req.Filter) + smc.log.Debugf("List time series request filter: %s", req.Filter) defer close(tsChan) // Iterate over timeseries and send them to buffered channel - tsResp := c.conn.ListTimeSeries(ctx, req) - c.listTimeSeriesCalls.Incr(1) + tsResp := smc.conn.ListTimeSeries(ctx, req) + smc.listTimeSeriesCalls.Incr(1) for { tsDesc, tsErr := tsResp.Next() if tsErr != nil { if tsErr != iterator.Done { - c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) + smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) } break } @@ -259,8 +259,8 @@ func (c *stackdriverMetricClient) ListTimeSeries( } // Close implements metricClient interface -func (s *stackdriverMetricClient) Close() error { - return s.conn.Close() +func (smc *stackdriverMetricClient) Close() error { + return smc.conn.Close() } // Description implements telegraf.Input interface @@ -322,14 +322,14 @@ func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { // Returns the start and end time for the next collection. func (s *Stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) { var start time.Time - if s.Window.Duration != 0 { - start = time.Now().Add(-s.Delay.Duration).Add(-s.Window.Duration) + if time.Duration(s.Window) != 0 { + start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(s.Window)) } else if prevEnd.IsZero() { - start = time.Now().Add(-s.Delay.Duration).Add(-defaultWindow.Duration) + start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(defaultWindow)) } else { start = prevEnd } - end := time.Now().Add(-s.Delay.Duration) + end := time.Now().Add(-time.Duration(s.Delay)) return start, end } @@ -393,11 +393,11 @@ func (s *Stackdriver) newTimeSeriesConf( ) *timeSeriesConf { filter := s.newListTimeSeriesFilter(metricType) interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), Filter: filter, Interval: interval, } @@ -432,7 +432,7 @@ func (t *timeSeriesConf) initForAggregate(alignerStr string) { } aligner := monitoringpb.Aggregation_Aligner(alignerInt) agg := &monitoringpb.Aggregation{ - AlignmentPeriod: &googlepbduration.Duration{Seconds: 60}, + AlignmentPeriod: &durationpb.Duration{Seconds: 60}, PerSeriesAligner: aligner, } t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr) @@ -522,8 +522,8 @@ func (s *Stackdriver) generatetimeSeriesConfs( if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() { // Update interval for timeseries requests in timeseries cache interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs { timeSeriesConf.listTimeSeriesRequest.Interval = interval @@ -533,7 +533,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( ret := []*timeSeriesConf{} req := &monitoringpb.ListMetricDescriptorsRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), } filters := s.newListMetricDescriptorsFilters() @@ -579,7 +579,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( s.timeSeriesConfCache = &timeSeriesConfCache{ TimeSeriesConfs: ret, Generated: time.Now(), - TTL: s.CacheTTL.Duration, + TTL: time.Duration(s.CacheTTL), } return ret, nil @@ -613,7 +613,9 @@ func (s *Stackdriver) gatherTimeSeries( if tsDesc.ValueType == metricpb.MetricDescriptor_DISTRIBUTION { dist := p.Value.GetDistributionValue() - s.addDistribution(dist, tags, ts, grouper, tsConf) + if err := s.addDistribution(dist, tags, ts, grouper, tsConf); err != nil { + return err + } } else { var value interface{} @@ -630,7 +632,9 @@ func (s *Stackdriver) gatherTimeSeries( value = p.Value.GetStringValue() } - grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value) + if err := grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value); err != nil { + return err + } } } } @@ -642,17 +646,27 @@ func (s *Stackdriver) gatherTimeSeries( func (s *Stackdriver) addDistribution( metric *distributionpb.Distribution, tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, -) { +) error { field := tsConf.fieldKey name := tsConf.measurement - grouper.Add(name, tags, ts, field+"_count", metric.Count) - grouper.Add(name, tags, ts, field+"_mean", metric.Mean) - grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation) + if err := grouper.Add(name, tags, ts, field+"_count", metric.Count); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_mean", metric.Mean); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation); err != nil { + return err + } if metric.Range != nil { - grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min) - grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max) + if err := grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max); err != nil { + return err + } } linearBuckets := metric.BucketOptions.GetLinearBuckets() @@ -693,8 +707,12 @@ func (s *Stackdriver) addDistribution( if i < int32(len(metric.BucketCounts)) { count += metric.BucketCounts[i] } - grouper.Add(name, tags, ts, field+"_bucket", count) + if err := grouper.Add(name, tags, ts, field+"_bucket", count); err != nil { + return err + } } + + return nil } func init() { diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 8010ad4817924..ad6b15145031a 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" @@ -15,6 +14,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) type Call struct { @@ -105,7 +105,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -138,7 +138,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -171,7 +171,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -204,7 +204,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -249,7 +249,7 @@ func TestGather(t *testing.T) { Points: []*monitoringpb.Point{ { Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -283,7 +283,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -378,7 +378,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -473,7 +473,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -556,7 +556,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -702,7 +702,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -717,7 +717,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -732,7 +732,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -803,7 +803,6 @@ func TestGatherAlign(t *testing.T) { } testutil.RequireMetricsEqual(t, tt.expected, actual) - }) } } @@ -1082,7 +1081,7 @@ func TestListMetricDescriptorFilter(t *testing.T) { ch <- createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -1126,8 +1125,8 @@ func TestListMetricDescriptorFilter(t *testing.T) { } } -func TestNewListTimeSeriesFilter(t *testing.T) { +func TestNewListTimeSeriesFilter(_ *testing.T) { } -func TestTimeSeriesConfCacheIsValid(t *testing.T) { +func TestTimeSeriesConfCacheIsValid(_ *testing.T) { } diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 57953eed72600..ca60dbe3a2a79 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -50,6 +50,10 @@ ## http://docs.datadoghq.com/guides/dogstatsd/ datadog_extensions = false + ## Parses distributions metric as specified in the datadog statsd format + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + datadog_distributions = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ @@ -68,6 +72,9 @@ ## Maximum socket buffer size in bytes, once the buffer fills up, metrics ## will start dropping. Defaults to the OS default. # read_buffer_size = 65535 + + ## Max duration (TTL) for each metric to stay cached/reported without being updated. + # max_ttl = "10h" ``` ### Description @@ -95,6 +102,10 @@ implementation. In short, the telegraf statsd listener will accept: - `load.time:320|ms` - `load.time.nanoseconds:1|h` - `load.time:200|ms|@0.1` <- sampled 1/10 of the time +- Distributions + - `load.time:320|d` + - `load.time.nanoseconds:1|d` + - `load.time:200|d|@0.1` <- sampled 1/10 of the time It is possible to omit repetitive names and merge individual stats into a single line by separating them with additional colons: @@ -169,6 +180,9 @@ metric type: that `P%` of all the values statsd saw for that stat during that time period are below x. The most common value that people use for `P` is the `90`, this is a great number to try to optimize. +- Distributions + - The Distribution metric represents the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. A Distribution can be used to instrument logical objects, like services, independently from the underlying hosts. + - Unlike the Histogram metric type, which aggregates on the Agent during a given time interval, a Distribution metric sends all the raw data during a time interval. ### Plugin arguments @@ -176,7 +190,7 @@ metric type: - **max_tcp_connections** []int: Maximum number of concurrent TCP connections to allow. Used when protocol is set to tcp. - **tcp_keep_alive** boolean: Enable TCP keep alive probes -- **tcp_keep_alive_period** internal.Duration: Specifies the keep-alive period for an active network connection +- **tcp_keep_alive_period** duration: Specifies the keep-alive period for an active network connection - **service_address** string: Address to listen for statsd UDP packets on - **delete_gauges** boolean: Delete gauges on every collection interval - **delete_counters** boolean: Delete counters on every collection interval @@ -192,6 +206,8 @@ the accuracy of percentiles but also increases the memory usage and cpu time. measurements and tags. - **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) - **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) +- **datadog_distributions** boolean: Enable parsing of the Distribution metric in DataDog's dogstatsd format (https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition) +- **max_ttl** config.Duration: Max duration (TTL) for each metric to stay cached/reported without being updated. ### Statsd bucket -> InfluxDB line-protocol Templates diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index 377db66e6d3ad..77a01f5586a7b 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -38,29 +38,29 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam // tag is key:value messageRaw := strings.SplitN(message, ":", 2) if len(messageRaw) < 2 || len(messageRaw[0]) < 7 || len(messageRaw[1]) < 3 { - return fmt.Errorf("Invalid message format") + return fmt.Errorf("invalid message format") } header := messageRaw[0] message = messageRaw[1] rawLen := strings.SplitN(header[3:], ",", 2) if len(rawLen) != 2 { - return fmt.Errorf("Invalid message format") + return fmt.Errorf("invalid message format") } titleLen, err := strconv.ParseInt(rawLen[0], 10, 64) if err != nil { - return fmt.Errorf("Invalid message format, could not parse title.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse title.length: '%s'", rawLen[0]) } if len(rawLen[1]) < 1 { - return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) } textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64) if err != nil { - return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) } if titleLen+textLen+1 > int64(len(message)) { - return fmt.Errorf("Invalid message format, title.length and text.length exceed total message length") + return fmt.Errorf("invalid message format, title.length and text.length exceed total message length") } rawTitle := message[:titleLen] @@ -68,14 +68,14 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam message = message[titleLen+1+textLen:] if len(rawTitle) == 0 || len(rawText) == 0 { - return fmt.Errorf("Invalid event message format: empty 'title' or 'text' field") + return fmt.Errorf("invalid event message format: empty 'title' or 'text' field") } name := rawTitle tags := make(map[string]string, strings.Count(message, ",")+2) // allocate for the approximate number of tags fields := make(map[string]interface{}, 9) fields["alert_type"] = eventInfo // default event type - fields["text"] = uncommenter.Replace(string(rawText)) + fields["text"] = uncommenter.Replace(rawText) if defaultHostname != "" { tags["source"] = defaultHostname } diff --git a/plugins/inputs/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go index a52209c5665cb..2cf987a69bbf1 100644 --- a/plugins/inputs/statsd/running_stats_test.go +++ b/plugins/inputs/statsd/running_stats_test.go @@ -162,8 +162,5 @@ func TestRunningStats_PercentileLimit(t *testing.T) { } func fuzzyEqual(a, b, epsilon float64) bool { - if math.Abs(a-b) > epsilon { - return false - } - return true + return math.Abs(a-b) <= epsilon } diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 9c5780d00a596..fbbfef251adf9 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -3,7 +3,6 @@ package statsd import ( "bufio" "bytes" - "errors" "fmt" "net" "sort" @@ -13,16 +12,18 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" + "github.com/pkg/errors" ) const ( - // UDP_MAX_PACKET_SIZE is the UDP packet limit, see + // UDPMaxPacketSize is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure - UDP_MAX_PACKET_SIZE int = 64 * 1024 + UDPMaxPacketSize int = 64 * 1024 defaultFieldName = "value" @@ -30,11 +31,25 @@ const ( defaultSeparator = "_" defaultAllowPendingMessage = 10000 - MaxTCPConnections = 250 parserGoRoutines = 5 ) +var errParsing = errors.New("error parsing statsd line") + +// Number will get parsed as an int or float depending on what is passed +type Number float64 + +func (n *Number) UnmarshalTOML(b []byte) error { + value, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + + *n = Number(value) + return nil +} + // Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp @@ -49,7 +64,7 @@ type Statsd struct { // Percentiles specifies the percentiles that will be calculated for timing // and histogram stats. - Percentiles []internal.Number + Percentiles []Number PercentileLimit int DeleteGauges bool @@ -69,6 +84,11 @@ type Statsd struct { // http://docs.datadoghq.com/guides/dogstatsd/ DataDogExtensions bool `toml:"datadog_extensions"` + // Parses distribution metrics in the datadog statsd format. + // Requires the DataDogExtension flag to be enabled. + // https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + DataDogDistributions bool `toml:"datadog_distributions"` + // UDPPacketSize is deprecated, it's only here for legacy support // we now always create 1 max size buffer and then copy only what we need // into the in channel @@ -87,8 +107,6 @@ type Statsd struct { accept chan bool // drops tracks the number of dropped metrics. drops int - // malformed tracks the number of malformed packets - malformed int // Channel for all incoming statsd packets in chan input @@ -97,10 +115,12 @@ type Statsd struct { // Cache gauges, counters & sets so they can be aggregated as they arrive // gauges and counters map measurement/tags hash -> field name -> metrics // sets and timings map measurement/tags hash -> metrics - gauges map[string]cachedgauge - counters map[string]cachedcounter - sets map[string]cachedset - timings map[string]cachedtimings + // distributions aggregate measurement/tags and are published directly + gauges map[string]cachedgauge + counters map[string]cachedcounter + sets map[string]cachedset + timings map[string]cachedtimings + distributions []cacheddistributions // bucket -> influx templates Templates []string @@ -114,8 +134,11 @@ type Statsd struct { MaxTCPConnections int `toml:"max_tcp_connections"` - TCPKeepAlive bool `toml:"tcp_keep_alive"` - TCPKeepAlivePeriod *internal.Duration `toml:"tcp_keep_alive_period"` + TCPKeepAlive bool `toml:"tcp_keep_alive"` + TCPKeepAlivePeriod *config.Duration `toml:"tcp_keep_alive_period"` + + // Max duration for each metric to stay cached without being updated. + MaxTTL config.Duration `toml:"max_ttl"` graphiteParser *graphite.GraphiteParser @@ -131,7 +154,7 @@ type Statsd struct { UDPBytesRecv selfstat.Stat ParseTimeNS selfstat.Stat - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` // A pool of byte slices to handle parsing bufPool sync.Pool @@ -159,30 +182,40 @@ type metric struct { } type cachedset struct { - name string - fields map[string]map[string]bool - tags map[string]string + name string + fields map[string]map[string]bool + tags map[string]string + expiresAt time.Time } type cachedgauge struct { - name string - fields map[string]interface{} - tags map[string]string + name string + fields map[string]interface{} + tags map[string]string + expiresAt time.Time } type cachedcounter struct { - name string - fields map[string]interface{} - tags map[string]string + name string + fields map[string]interface{} + tags map[string]string + expiresAt time.Time } type cachedtimings struct { - name string - fields map[string]RunningStats - tags map[string]string + name string + fields map[string]RunningStats + tags map[string]string + expiresAt time.Time +} + +type cacheddistributions struct { + name string + value float64 + tags map[string]string } -func (_ *Statsd) Description() string { +func (s *Statsd) Description() string { return "Statsd UDP/TCP Server" } @@ -229,6 +262,10 @@ const sampleConfig = ` ## Parses datadog extensions to the statsd format datadog_extensions = false + ## Parses distributions metric as specified in the datadog statsd format + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + datadog_distributions = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ @@ -243,9 +280,12 @@ const sampleConfig = ` ## calculation of percentiles. Raising this limit increases the accuracy ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 + + ## Max duration (TTL) for each metric to stay cached/reported without being updated. + #max_ttl = "1000h" ` -func (_ *Statsd) SampleConfig() string { +func (s *Statsd) SampleConfig() string { return sampleConfig } @@ -254,6 +294,14 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { defer s.Unlock() now := time.Now() + for _, m := range s.distributions { + fields := map[string]interface{}{ + defaultFieldName: m.value, + } + acc.AddFields(m.name, fields, m.tags, now) + } + s.distributions = make([]cacheddistributions, 0) + for _, m := range s.timings { // Defining a template to parse field names for timers allows us to split // out multiple fields per timer. In this case we prefix each stat with the @@ -271,8 +319,8 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { fields[prefix+"lower"] = stats.Lower() fields[prefix+"count"] = stats.Count() for _, percentile := range s.Percentiles { - name := fmt.Sprintf("%s%v_percentile", prefix, percentile.Value) - fields[name] = stats.Percentile(percentile.Value) + name := fmt.Sprintf("%s%v_percentile", prefix, percentile) + fields[name] = stats.Percentile(float64(percentile)) } } @@ -306,6 +354,9 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { if s.DeleteSets { s.sets = make(map[string]cachedset) } + + s.expireCachedMetrics() + return nil } @@ -322,6 +373,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.distributions = make([]cacheddistributions, 0) s.Lock() defer s.Unlock() @@ -378,7 +430,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.udpListen(conn) + if err := s.udpListen(conn); err != nil { + ac.AddError(err) + } }() } else { address, err := net.ResolveTCPAddr("tcp", s.ServiceAddress) @@ -396,7 +450,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.tcpListen(listener) + if err := s.tcpListen(listener); err != nil { + ac.AddError(err) + } }() } @@ -405,7 +461,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.parser() + if err := s.parser(); err != nil { + ac.AddError(err) + } }() } s.Log.Infof("Started the statsd service on %q", s.ServiceAddress) @@ -431,7 +489,7 @@ func (s *Statsd) tcpListen(listener *net.TCPListener) error { } if s.TCPKeepAlivePeriod != nil { - if err = conn.SetKeepAlivePeriod(s.TCPKeepAlivePeriod.Duration); err != nil { + if err = conn.SetKeepAlivePeriod(time.Duration(*s.TCPKeepAlivePeriod)); err != nil { return err } } @@ -456,10 +514,12 @@ func (s *Statsd) tcpListen(listener *net.TCPListener) error { // udpListen starts listening for udp packets on the configured port. func (s *Statsd) udpListen(conn *net.UDPConn) error { if s.ReadBufferSize > 0 { - s.UDPlistener.SetReadBuffer(s.ReadBufferSize) + if err := s.UDPlistener.SetReadBuffer(s.ReadBufferSize); err != nil { + return err + } } - buf := make([]byte, UDP_MAX_PACKET_SIZE) + buf := make([]byte, UDPMaxPacketSize) for { select { case <-s.done: @@ -475,9 +535,14 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { } s.UDPPacketsRecv.Incr(1) s.UDPBytesRecv.Incr(int64(n)) - b := s.bufPool.Get().(*bytes.Buffer) + b, ok := s.bufPool.Get().(*bytes.Buffer) + if !ok { + return fmt.Errorf("bufPool is not a bytes buffer") + } b.Reset() - b.Write(buf[:n]) + if _, err := b.Write(buf[:n]); err != nil { + return err + } select { case s.in <- input{ Buffer: b, @@ -513,9 +578,17 @@ func (s *Statsd) parser() error { switch { case line == "": case s.DataDogExtensions && strings.HasPrefix(line, "_e"): - s.parseEventMessage(in.Time, line, in.Addr) + if err := s.parseEventMessage(in.Time, line, in.Addr); err != nil { + return err + } default: - s.parseStatsdLine(line) + if err := s.parseStatsdLine(line); err != nil { + if errors.Cause(err) == errParsing { + // parsing errors log when the error occurs + continue + } + return err + } } } elapsed := time.Since(start) @@ -527,9 +600,6 @@ func (s *Statsd) parser() error { // parseStatsdLine will parse the given statsd line, validating it as it goes. // If the line is valid, it will be cached for the next call to Gather() func (s *Statsd) parseStatsdLine(line string) error { - s.Lock() - defer s.Unlock() - lineTags := make(map[string]string) if s.DataDogExtensions { recombinedSegments := make([]string, 0) @@ -554,7 +624,7 @@ func (s *Statsd) parseStatsdLine(line string) error { bits := strings.Split(line, ":") if len(bits) < 2 { s.Log.Errorf("Splitting ':', unable to parse metric: %s", line) - return errors.New("error Parsing statsd line") + return errParsing } // Extract bucket name from individual metric bits @@ -570,7 +640,7 @@ func (s *Statsd) parseStatsdLine(line string) error { pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { s.Log.Errorf("Splitting '|', unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } else if len(pipesplit) > 2 { sr := pipesplit[2] @@ -590,28 +660,28 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate metric type switch pipesplit[1] { - case "g", "c", "s", "ms", "h": + case "g", "c", "s", "ms", "h", "d": m.mtype = pipesplit[1] default: s.Log.Errorf("Metric type %q unsupported", pipesplit[1]) - return errors.New("error parsing statsd line") + return errParsing } // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" && m.mtype != "c" { s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } m.additive = true } switch m.mtype { - case "g", "ms", "h": + case "g", "ms", "h", "d": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } m.floatvalue = v case "c": @@ -621,7 +691,7 @@ func (s *Statsd) parseStatsdLine(line string) error { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } v = int64(v2) } @@ -647,6 +717,8 @@ func (s *Statsd) parseStatsdLine(line string) error { m.tags["metric_type"] = "timing" case "h": m.tags["metric_type"] = "histogram" + case "d": + m.tags["metric_type"] = "distribution" } if len(lineTags) > 0 { for k, v := range lineTags { @@ -674,6 +746,8 @@ func (s *Statsd) parseStatsdLine(line string) error { // map of tags. // Return values are (, , ) func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { + s.Lock() + defer s.Unlock() tags := make(map[string]string) bucketparts := strings.Split(bucket, ",") @@ -734,7 +808,19 @@ func parseKeyValue(keyvalue string) (string, string) { // aggregates and caches the current value(s). It does not deal with the // Delete* options, because those are dealt with in the Gather function. func (s *Statsd) aggregate(m metric) { + s.Lock() + defer s.Unlock() + switch m.mtype { + case "d": + if s.DataDogExtensions && s.DataDogDistributions { + cached := cacheddistributions{ + name: m.name, + value: m.floatvalue, + tags: m.tags, + } + s.distributions = append(s.distributions, cached) + } case "ms", "h": // Check if the measurement exists cached, ok := s.timings[m.hash] @@ -761,61 +847,67 @@ func (s *Statsd) aggregate(m metric) { field.AddValue(m.floatvalue) } cached.fields[m.field] = field + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) s.timings[m.hash] = cached case "c": // check if the measurement exists - _, ok := s.counters[m.hash] + cached, ok := s.counters[m.hash] if !ok { - s.counters[m.hash] = cachedcounter{ + cached = cachedcounter{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists - _, ok = s.counters[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.counters[m.hash].fields[m.field] = int64(0) + cached.fields[m.field] = int64(0) } - s.counters[m.hash].fields[m.field] = - s.counters[m.hash].fields[m.field].(int64) + m.intvalue + cached.fields[m.field] = cached.fields[m.field].(int64) + m.intvalue + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.counters[m.hash] = cached case "g": // check if the measurement exists - _, ok := s.gauges[m.hash] + cached, ok := s.gauges[m.hash] if !ok { - s.gauges[m.hash] = cachedgauge{ + cached = cachedgauge{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists - _, ok = s.gauges[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.gauges[m.hash].fields[m.field] = float64(0) + cached.fields[m.field] = float64(0) } if m.additive { - s.gauges[m.hash].fields[m.field] = - s.gauges[m.hash].fields[m.field].(float64) + m.floatvalue + cached.fields[m.field] = cached.fields[m.field].(float64) + m.floatvalue } else { - s.gauges[m.hash].fields[m.field] = m.floatvalue + cached.fields[m.field] = m.floatvalue } + + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.gauges[m.hash] = cached case "s": // check if the measurement exists - _, ok := s.sets[m.hash] + cached, ok := s.sets[m.hash] if !ok { - s.sets[m.hash] = cachedset{ + cached = cachedset{ name: m.name, fields: make(map[string]map[string]bool), tags: m.tags, } } // check if the field exists - _, ok = s.sets[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.sets[m.hash].fields[m.field] = make(map[string]bool) + cached.fields[m.field] = make(map[string]bool) } - s.sets[m.hash].fields[m.field][m.strvalue] = true + cached.fields[m.field][m.strvalue] = true + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.sets[m.hash] = cached } } @@ -826,7 +918,11 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // connection cleanup function defer func() { s.wg.Done() + + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() + // Add one connection potential back to channel when this one closes s.accept <- true s.forget(id) @@ -857,7 +953,10 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { b := s.bufPool.Get().(*bytes.Buffer) b.Reset() + // Writes to a bytes buffer always succeed, so do not check the errors here + //nolint:errcheck,revive b.Write(scanner.Bytes()) + //nolint:errcheck,revive b.WriteByte('\n') select { @@ -876,6 +975,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // refuser refuses a TCP connection func (s *Statsd) refuser(conn *net.TCPConn) { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") @@ -900,8 +1001,12 @@ func (s *Statsd) Stop() { s.Log.Infof("Stopping the statsd service") close(s.done) if s.isUDP() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.UDPlistener.Close() } else { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.TCPlistener.Close() // Close all open TCP connections // - get all conns from the s.conns map and put into slice @@ -914,6 +1019,8 @@ func (s *Statsd) Stop() { } s.cleanup.Unlock() for _, conn := range conns { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() } } @@ -932,6 +1039,39 @@ func (s *Statsd) isUDP() bool { return strings.HasPrefix(s.Protocol, "udp") } +func (s *Statsd) expireCachedMetrics() { + // If Max TTL wasn't configured, skip expiration. + if s.MaxTTL == 0 { + return + } + + now := time.Now() + + for key, cached := range s.gauges { + if now.After(cached.expiresAt) { + delete(s.gauges, key) + } + } + + for key, cached := range s.sets { + if now.After(cached.expiresAt) { + delete(s.sets, key) + } + } + + for key, cached := range s.timings { + if now.After(cached.expiresAt) { + delete(s.timings, key) + } + } + + for key, cached := range s.counters { + if now.After(cached.expiresAt) { + delete(s.counters, key) + } + } +} + func init() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index f76681134a094..a236d638ba330 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -2,15 +2,16 @@ package statsd import ( "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "net" "sync" "testing" "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) @@ -29,6 +30,7 @@ func NewTestStatsd() *Statsd { s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.distributions = make([]cacheddistributions, 0) s.MetricSeparator = "_" @@ -58,7 +60,7 @@ func TestConcurrentConns(t *testing.T) { // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8125") + _, err = net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) _, err = conn.Write([]byte(testMsg)) assert.NoError(t, err) @@ -87,7 +89,7 @@ func TestConcurrentConns1(t *testing.T) { // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8125") + _, err = net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) _, err = conn.Write([]byte(testMsg)) assert.NoError(t, err) @@ -129,16 +131,11 @@ func BenchmarkUDP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) time.Sleep(time.Millisecond * 250) conn, err := net.Dial("udp", "127.0.0.1:8125") - if err != nil { - panic(err) - } + require.NoError(b, err) var wg sync.WaitGroup for i := 1; i <= producerThreads; i++ { @@ -149,7 +146,6 @@ func BenchmarkUDP(b *testing.B) { // wait for 250,000 metrics to get added to accumulator for len(listener.in) > 0 { - fmt.Printf("Left in buffer: %v \n", len(listener.in)) time.Sleep(time.Millisecond) } listener.Stop() @@ -159,6 +155,7 @@ func BenchmarkUDP(b *testing.B) { func sendRequests(conn net.Conn, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < 25000; i++ { + //nolint:errcheck,revive fmt.Fprintf(conn, testMsg) } } @@ -176,16 +173,12 @@ func BenchmarkTCP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) time.Sleep(time.Millisecond * 250) conn, err := net.Dial("tcp", "127.0.0.1:8125") - if err != nil { - panic(err) - } + require.NoError(b, err) + var wg sync.WaitGroup for i := 1; i <= producerThreads; i++ { wg.Add(1) @@ -212,10 +205,7 @@ func TestParse_ValidLines(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } } @@ -243,10 +233,7 @@ func TestParse_Gauges(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -288,10 +275,7 @@ func TestParse_Gauges(t *testing.T) { } for _, test := range validations { - err := testValidateGauge(test.name, test.value, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge(test.name, test.value, s.gauges)) } } @@ -321,10 +305,7 @@ func TestParse_Sets(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -350,10 +331,7 @@ func TestParse_Sets(t *testing.T) { } for _, test := range validations { - err := testValidateSet(test.name, test.value, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet(test.name, test.value, s.sets)) } } @@ -378,10 +356,7 @@ func TestParse_Counters(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -415,20 +390,17 @@ func TestParse_Counters(t *testing.T) { } for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } // Tests low-level functionality of timings func TestParse_Timings(t *testing.T) { s := NewTestStatsd() - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} - // Test that counters work + // Test that timings work validLines := []string{ "test.timing:1|ms", "test.timing:11|ms", @@ -438,13 +410,10 @@ func TestParse_Timings(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) valid := map[string]interface{}{ "90_percentile": float64(11), @@ -459,6 +428,60 @@ func TestParse_Timings(t *testing.T) { acc.AssertContainsFields(t, "test_timing", valid) } +// Tests low-level functionality of distributions +func TestParse_Distributions(t *testing.T) { + s := NewTestStatsd() + acc := &testutil.Accumulator{} + + parseMetrics := func() { + // Test that distributions work + validLines := []string{ + "test.distribution:1|d", + "test.distribution2:2|d", + "test.distribution3:3|d", + "test.distribution4:1|d", + "test.distribution5:1|d", + } + + for _, line := range validLines { + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) + } + + require.NoError(t, s.Gather(acc)) + } + + validMeasurementMap := map[string]float64{ + "test_distribution": 1, + "test_distribution2": 2, + "test_distribution3": 3, + "test_distribution4": 1, + "test_distribution5": 1, + } + + // Test parsing when DataDogExtensions and DataDogDistributions aren't enabled + parseMetrics() + for key := range validMeasurementMap { + acc.AssertDoesNotContainMeasurement(t, key) + } + + // Test parsing when DataDogDistributions is enabled but not DataDogExtensions + s.DataDogDistributions = true + parseMetrics() + for key := range validMeasurementMap { + acc.AssertDoesNotContainMeasurement(t, key) + } + + // Test parsing when DataDogExtensions and DataDogDistributions are enabled + s.DataDogExtensions = true + parseMetrics() + for key, value := range validMeasurementMap { + field := map[string]interface{}{ + "value": float64(value), + } + acc.AssertContainsFields(t, key, field) + } +} + func TestParseScientificNotation(t *testing.T) { s := NewTestStatsd() sciNotationLines := []string{ @@ -468,10 +491,7 @@ func TestParseScientificNotation(t *testing.T) { "scientific.notation:4.6968460083008E-5|h", } for _, line := range sciNotationLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line [%s] should not have resulted in error: %s\n", line, err) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line [%s] should not have resulted in error", line) } } @@ -490,10 +510,7 @@ func TestParse_InvalidLines(t *testing.T) { "invalid.value:1d1|c", } for _, line := range invalidLines { - err := s.parseStatsdLine(line) - if err == nil { - t.Errorf("Parsing line %s should have resulted in an error\n", line) - } + require.Errorf(t, s.parseStatsdLine(line), "Parsing line %s should have resulted in an error", line) } } @@ -508,10 +525,7 @@ func TestParse_InvalidSampleRate(t *testing.T) { } for _, line := range invalidLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } counterValidations := []struct { @@ -532,21 +546,12 @@ func TestParse_InvalidSampleRate(t *testing.T) { } for _, test := range counterValidations { - err := testValidateCounter(test.name, test.value, test.cache) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, test.cache)) } - err := testValidateGauge("invalid_sample_rate", 45, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("invalid_sample_rate", 45, s.gauges)) - err = testValidateSet("invalid_sample_rate", 1, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("invalid_sample_rate", 1, s.sets)) } // Names should be parsed like . -> _ @@ -558,10 +563,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -579,10 +581,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -599,10 +598,7 @@ func TestParse_Template(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -621,10 +617,7 @@ func TestParse_Template(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -641,10 +634,7 @@ func TestParse_TemplateFilter(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -663,10 +653,7 @@ func TestParse_TemplateFilter(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -683,10 +670,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -701,10 +685,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -731,10 +712,7 @@ func TestParse_TemplateFields(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } counterTests := []struct { @@ -760,10 +738,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate counters for _, test := range counterTests { - err := testValidateCounter(test.name, test.value, s.counters, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters, test.field)) } gaugeTests := []struct { @@ -784,10 +759,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate gauges for _, test := range gaugeTests { - err := testValidateGauge(test.name, test.value, s.gauges, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge(test.name, test.value, s.gauges, test.field)) } setTests := []struct { @@ -808,10 +780,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate sets for _, test := range setTests { - err := testValidateSet(test.name, test.value, s.sets, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet(test.name, test.value, s.sets, test.field)) } } @@ -859,18 +828,12 @@ func TestParse_Tags(t *testing.T) { for _, test := range tests { name, _, tags := s.parseName(test.bucket) - if name != test.name { - t.Errorf("Expected: %s, got %s", test.name, name) - } + require.Equalf(t, name, test.name, "Expected: %s, got %s", test.name, name) for k, v := range test.tags { actual, ok := tags[k] - if !ok { - t.Errorf("Expected key: %s not found", k) - } - if actual != v { - t.Errorf("Expected %s, got %s", v, actual) - } + require.Truef(t, ok, "Expected key: %s not found", k) + require.Equalf(t, actual, v, "Expected %s, got %s", v, actual) } } } @@ -985,10 +948,8 @@ func TestParse_DataDogTags(t *testing.T) { s := NewTestStatsd() s.DataDogExtensions = true - err := s.parseStatsdLine(tt.line) - require.NoError(t, err) - err = s.Gather(&acc) - require.NoError(t, err) + require.NoError(t, s.parseStatsdLine(tt.line)) + require.NoError(t, s.Gather(&acc)) testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) @@ -1020,9 +981,7 @@ func TestParseName(t *testing.T) { for _, test := range tests { name, _, _ := s.parseName(test.inName) - if name != test.outName { - t.Errorf("Expected: %s, got %s", test.outName, name) - } + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) } // Test with separator == "." @@ -1048,9 +1007,7 @@ func TestParseName(t *testing.T) { for _, test := range tests { name, _, _ := s.parseName(test.inName) - if name != test.outName { - t.Errorf("Expected: %s, got %s", test.outName, name) - } + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) } } @@ -1066,15 +1023,72 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - if len(s.counters) != 2 { - t.Errorf("Expected 2 separate measurements, found %d", len(s.counters)) - } + require.Lenf(t, s.counters, 2, "Expected 2 separate measurements, found %d", len(s.counters)) +} + +// Test that the metric caches expire (clear) an entry after the entry hasn't been updated for the configurable MaxTTL duration. +func TestCachesExpireAfterMaxTTL(t *testing.T) { + s := NewTestStatsd() + s.MaxTTL = config.Duration(100 * time.Microsecond) + + acc := &testutil.Accumulator{} + require.NoError(t, s.parseStatsdLine("valid:45|c")) + require.NoError(t, s.parseStatsdLine("valid:45|c")) + require.NoError(t, s.Gather(acc)) + + // Max TTL goes by, our 'valid' entry is cleared. + time.Sleep(100 * time.Microsecond) + require.NoError(t, s.Gather(acc)) + + // Now when we gather, we should have a counter that is reset to zero. + require.NoError(t, s.parseStatsdLine("valid:45|c")) + require.NoError(t, s.Gather(acc)) + + // Wait for the metrics to arrive + acc.Wait(3) + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 90, + }, + time.Now(), + telegraf.Counter, + ), + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 90, + }, + time.Now(), + telegraf.Counter, + ), + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 45, + }, + time.Now(), + telegraf.Counter, + ), + }, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) } // Test that measurements with multiple bits, are treated as different outputs @@ -1119,92 +1133,52 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { sMultiple := NewTestStatsd() for _, line := range singleLines { - err := sSingle.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, sSingle.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } for _, line := range multipleLines { - err := sMultiple.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, sMultiple.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - if len(sSingle.timings) != 3 { - t.Errorf("Expected 3 measurement, found %d", len(sSingle.timings)) - } + require.Lenf(t, sSingle.timings, 3, "Expected 3 measurement, found %d", len(sSingle.timings)) - if cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"]; !ok { - t.Errorf("Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") - } else { - if cachedtiming.name != "valid_multiple" { - t.Errorf("Expected the name to be 'valid_multiple', got %s", cachedtiming.name) - } + cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"] + require.Truef(t, ok, "Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") + require.Equalf(t, cachedtiming.name, "valid_multiple", "Expected the name to be 'valid_multiple', got %s", cachedtiming.name) - // A 0 at samplerate 0.1 will add 10 values of 0, - // A 0 with invalid samplerate will add a single 0, - // plus the last bit of value 1 - // which adds up to 12 individual datapoints to be cached - if cachedtiming.fields[defaultFieldName].n != 12 { - t.Errorf("Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n) - } + // A 0 at samplerate 0.1 will add 10 values of 0, + // A 0 with invalid samplerate will add a single 0, + // plus the last bit of value 1 + // which adds up to 12 individual datapoints to be cached + require.EqualValuesf(t, cachedtiming.fields[defaultFieldName].n, 12, "Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n) - if cachedtiming.fields[defaultFieldName].upper != 1 { - t.Errorf("Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper) - } - } + require.EqualValuesf(t, cachedtiming.fields[defaultFieldName].upper, 1, "Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper) // test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate - if err := testValidateSet("valid_multiple_duplicate", 2, sSingle.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sSingle.sets)) - if err := testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets)) - if err := testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters)) - if err := testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters)) - if err := testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges)) - if err := testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges)) // test if sSingle and sMultiple did compute the same stats for valid.multiple.mixed - if err := testValidateSet("valid_multiple_mixed", 1, sSingle.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_mixed", 1, sSingle.sets)) - if err := testValidateSet("valid_multiple_mixed", 1, sMultiple.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_mixed", 1, sMultiple.sets)) - if err := testValidateCounter("valid_multiple_mixed", 1, sSingle.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_mixed", 1, sSingle.counters)) - if err := testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters)) - if err := testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges)) - if err := testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges)) } // Tests low-level functionality of timings when multiple fields is enabled @@ -1212,7 +1186,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{"measurement.field"} - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} validLines := []string{ @@ -1229,12 +1203,9 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) valid := map[string]interface{}{ "success_90_percentile": float64(11), @@ -1263,7 +1234,7 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{} - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} validLines := []string{ @@ -1280,12 +1251,9 @@ func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) expectedSuccess := map[string]interface{}{ "90_percentile": float64(11), @@ -1444,23 +1412,15 @@ func TestParse_Timings_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteTimings = true fakeacc := &testutil.Accumulator{} - var err error line := "timing:100|ms" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - if len(s.timings) != 1 { - t.Errorf("Should be 1 timing, found %d", len(s.timings)) - } + require.Lenf(t, s.timings, 1, "Should be 1 timing, found %d", len(s.timings)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - if len(s.timings) != 0 { - t.Errorf("All timings should have been deleted, found %d", len(s.timings)) - } + require.Lenf(t, s.timings, 0, "All timings should have been deleted, found %d", len(s.timings)) } // Tests the delete_gauges option @@ -1468,25 +1428,15 @@ func TestParse_Gauges_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteGauges = true fakeacc := &testutil.Accumulator{} - var err error line := "current.users:100|g" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - err = testValidateGauge("current_users", 100, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("current_users", 100, s.gauges)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateGauge("current_users", 100, s.gauges) - if err == nil { - t.Error("current_users_gauge metric should have been deleted") - } + require.Error(t, testValidateGauge("current_users", 100, s.gauges), "current_users_gauge metric should have been deleted") } // Tests the delete_sets option @@ -1494,25 +1444,15 @@ func TestParse_Sets_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteSets = true fakeacc := &testutil.Accumulator{} - var err error line := "unique.user.ids:100|s" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - err = testValidateSet("unique_user_ids", 1, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("unique_user_ids", 1, s.sets)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateSet("unique_user_ids", 1, s.sets) - if err == nil { - t.Error("unique_user_ids_set metric should have been deleted") - } + require.Error(t, testValidateSet("unique_user_ids", 1, s.sets), "unique_user_ids_set metric should have been deleted") } // Tests the delete_counters option @@ -1520,43 +1460,25 @@ func TestParse_Counters_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteCounters = true fakeacc := &testutil.Accumulator{} - var err error line := "total.users:100|c" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error\n", line) - err = testValidateCounter("total_users", 100, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("total_users", 100, s.counters)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateCounter("total_users", 100, s.counters) - if err == nil { - t.Error("total_users_counter metric should have been deleted") - } + require.Error(t, testValidateCounter("total_users", 100, s.counters), "total_users_counter metric should have been deleted") } func TestParseKeyValue(t *testing.T) { k, v := parseKeyValue("foo=bar") - if k != "foo" { - t.Errorf("Expected %s, got %s", "foo", k) - } - if v != "bar" { - t.Errorf("Expected %s, got %s", "bar", v) - } + require.Equalf(t, k, "foo", "Expected %s, got %s", "foo", k) + require.Equalf(t, v, "bar", "Expected %s, got %s", "bar", v) k2, v2 := parseKeyValue("baz") - if k2 != "" { - t.Errorf("Expected %s, got %s", "", k2) - } - if v2 != "baz" { - t.Errorf("Expected %s, got %s", "baz", v2) - } + require.Equalf(t, k2, "", "Expected %s, got %s", "", k2) + require.Equalf(t, v2, "baz", "Expected %s, got %s", "baz", v2) } // Test utility functions @@ -1670,12 +1592,10 @@ func TestTCP(t *testing.T) { conn, err := net.Dial("tcp", addr) _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) - err = conn.Close() - require.NoError(t, err) + require.NoError(t, conn.Close()) for { - err = statsd.Gather(&acc) - require.NoError(t, err) + require.NoError(t, statsd.Gather(&acc)) if len(acc.Metrics) > 0 { break @@ -1705,22 +1625,21 @@ func TestUdp(t *testing.T) { statsd := Statsd{ Log: testutil.Logger{}, Protocol: "udp", - ServiceAddress: "localhost:8125", + ServiceAddress: "localhost:14223", AllowedPendingMessages: 250000, } var acc testutil.Accumulator require.NoError(t, statsd.Start(&acc)) defer statsd.Stop() - conn, err := net.Dial("udp", "127.0.0.1:8125") - _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) + conn, err := net.Dial("udp", "127.0.0.1:14223") require.NoError(t, err) - err = conn.Close() + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) + require.NoError(t, conn.Close()) for { - err = statsd.Gather(&acc) - require.NoError(t, err) + require.NoError(t, statsd.Gather(&acc)) if len(acc.Metrics) > 0 { break @@ -1745,3 +1664,12 @@ func TestUdp(t *testing.T) { testutil.IgnoreTime(), ) } + +func TestParse_Ints(t *testing.T) { + s := NewTestStatsd() + s.Percentiles = []Number{90} + acc := &testutil.Accumulator{} + + require.NoError(t, s.Gather(acc)) + require.Equal(t, s.Percentiles, []Number{90.0}) +} diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md index 18b26298e7af4..61f940a8df01d 100644 --- a/plugins/inputs/suricata/README.md +++ b/plugins/inputs/suricata/README.md @@ -4,6 +4,7 @@ This plugin reports internal performance counters of the Suricata IDS/IPS engine, such as captured traffic volume, memory usage, uptime, flow counters, and much more. It provides a socket for the Suricata log output to write JSON stats output to, and processes the incoming data to fit Telegraf's format. +It can also report for triggered Suricata IDS/IPS alerts. ### Configuration @@ -17,6 +18,9 @@ stats output to, and processes the incoming data to fit Telegraf's format. # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # becomes "detect_alert" when delimiter is "_". delimiter = "_" + + # Detect alert logs + alerts = false ``` ### Metrics @@ -26,7 +30,7 @@ stats output. See http://suricata.readthedocs.io/en/latest/performance/statistics.html for more information. -All fields are numeric. +All fields for Suricata stats are numeric. - suricata - tags: - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics @@ -94,6 +98,19 @@ All fields are numeric. - tcp_synack - ... +Some fields of the Suricata alerts are strings, for example the signatures. See https://suricata.readthedocs.io/en/suricata-6.0.0/output/eve/eve-json-format.html?highlight=priority#event-type-alert for more information. + +- suricata_alert + - fields: + - action + - gid + - severity + - signature + - source_ip + - source_port + - target_port + - target_port + - ... #### Suricata configuration diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go index 17c0b571510b0..5e1dc384478b7 100644 --- a/plugins/inputs/suricata/suricata.go +++ b/plugins/inputs/suricata/suricata.go @@ -25,6 +25,7 @@ const ( type Suricata struct { Source string `toml:"source"` Delimiter string `toml:"delimiter"` + Alerts bool `toml:"alerts"` inputListener *net.UnixListener cancel context.CancelFunc @@ -36,11 +37,11 @@ type Suricata struct { // Description returns the plugin description. func (s *Suricata) Description() string { - return "Suricata stats plugin" + return "Suricata stats and alerts plugin" } const sampleConfig = ` - ## Data sink for Suricata stats log + ## Data sink for Suricata stats and alerts logs # This is expected to be a filename of a # unix socket to be created for listening. source = "/var/run/suricata-stats.sock" @@ -48,6 +49,9 @@ const sampleConfig = ` # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # becomes "detect_alert" when delimiter is "_". delimiter = "_" + + ## Detect alert logs + # alerts = false ` // SampleConfig returns a sample TOML section to illustrate configuration @@ -81,6 +85,8 @@ func (s *Suricata) Start(acc telegraf.Accumulator) error { // Stop causes the plugin to cease collecting JSON data from the socket provided // to Suricata. func (s *Suricata) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.inputListener.Close() if s.cancel != nil { s.cancel() @@ -98,8 +104,12 @@ func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn line, rerr := reader.ReadBytes('\n') if rerr != nil { return rerr - } else if len(line) > 0 { - s.parse(acc, line) + } + if len(line) > 0 { + err := s.parse(acc, line) + if err != nil { + acc.AddError(err) + } } } } @@ -146,29 +156,45 @@ func flexFlatten(outmap map[string]interface{}, field string, v interface{}, del return err } } + case []interface{}: + for _, v := range t { + err := flexFlatten(outmap, field, v, delimiter) + if err != nil { + return err + } + } + case string: + outmap[field] = v case float64: - outmap[field] = v.(float64) + outmap[field] = t default: - return fmt.Errorf("Unsupported type %T encountered", t) + return fmt.Errorf("unsupported type %T encountered", t) } return nil } -func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { - // initial parsing - var result map[string]interface{} - err := json.Unmarshal([]byte(sjson), &result) - if err != nil { - acc.AddError(err) +func (s *Suricata) parseAlert(acc telegraf.Accumulator, result map[string]interface{}) { + if _, ok := result["alert"].(map[string]interface{}); !ok { + s.Log.Debug("'alert' sub-object does not have required structure") return } - // check for presence of relevant stats - if _, ok := result["stats"]; !ok { - s.Log.Debug("Input does not contain necessary 'stats' sub-object") - return + totalmap := make(map[string]interface{}) + for k, v := range result["alert"].(map[string]interface{}) { + //source and target fields are maps + err := flexFlatten(totalmap, k, v, s.Delimiter) + if err != nil { + s.Log.Debugf("Flattening alert failed: %v", err) + // we skip this subitem as something did not parse correctly + continue + } } + //threads field do not exist in alert output, always global + acc.AddFields("suricata_alert", totalmap, nil) +} + +func (s *Suricata) parseStats(acc telegraf.Accumulator, result map[string]interface{}) { if _, ok := result["stats"].(map[string]interface{}); !ok { s.Log.Debug("The 'stats' sub-object does not have required structure") return @@ -182,9 +208,9 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { for k, t := range v { outmap := make(map[string]interface{}) if threadStruct, ok := t.(map[string]interface{}); ok { - err = flexFlatten(outmap, "", threadStruct, s.Delimiter) + err := flexFlatten(outmap, "", threadStruct, s.Delimiter) if err != nil { - s.Log.Debug(err) + s.Log.Debugf("Flattening alert failed: %v", err) // we skip this thread as something did not parse correctly continue } @@ -195,10 +221,11 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { s.Log.Debug("The 'threads' sub-object does not have required structure") } } else { - err = flexFlatten(totalmap, k, v, s.Delimiter) + err := flexFlatten(totalmap, k, v, s.Delimiter) if err != nil { - s.Log.Debug(err.Error()) + s.Log.Debugf("Flattening alert failed: %v", err) // we skip this subitem as something did not parse correctly + continue } } } @@ -213,9 +240,31 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { } } +func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) error { + // initial parsing + var result map[string]interface{} + err := json.Unmarshal(sjson, &result) + if err != nil { + return err + } + // check for presence of relevant stats or alert + _, ok := result["stats"] + _, ok2 := result["alert"] + if !ok && !ok2 { + s.Log.Debugf("Invalid input without 'stats' or 'alert' object: %v", result) + return fmt.Errorf("input does not contain 'stats' or 'alert' object") + } + if ok { + s.parseStats(acc, result) + } else if ok2 && s.Alerts { + s.parseAlert(acc, result) + } + return nil +} + // Gather measures and submits one full set of telemetry to Telegraf. // Not used here, submission is completely input-driven. -func (s *Suricata) Gather(acc telegraf.Accumulator) error { +func (s *Suricata) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 9c9c2ddc3694c..f3fc5f14eb394 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -2,7 +2,6 @@ package suricata import ( "fmt" - "io/ioutil" "log" "math/rand" "net" @@ -21,7 +20,7 @@ var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats"," var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` func TestSuricataLarge(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -29,6 +28,7 @@ func TestSuricataLarge(t *testing.T) { s := Suricata{ Source: tmpfn, Delimiter: ".", + Alerts: true, Log: testutil.Logger{ Name: "inputs.suricata", }, @@ -37,20 +37,85 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test1.json") + data, err := os.ReadFile("testdata/test1.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(data)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write(data) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + + //test suricata alerts + data2, err := os.ReadFile("testdata/test2.json") + require.NoError(t, err) + _, err = c.Write(data2) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) + + acc.Wait(1) +} + +func TestSuricataAlerts(t *testing.T) { + dir, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Alerts: true, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + data, err := os.ReadFile("testdata/test3.json") + require.NoError(t, err) + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + _, err = c.Write(data) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(1) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "suricata_alert", + map[string]string{}, + map[string]interface{}{ + "action": "allowed", + "category": "Misc activity", + "gid": float64(1), + "rev": float64(0), + "signature": "Corrupted HTTP body", + "signature_id": float64(6), + "severity": float64(3), + "source.ip": "10.0.0.5", + "target.ip": "179.60.192.3", + "source.port": float64(18715), + "target.port": float64(80), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestSuricata(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -68,9 +133,11 @@ func TestSuricata(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex2)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex2)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(1) @@ -94,7 +161,7 @@ func TestSuricata(t *testing.T) { } func TestThreadStats(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -113,13 +180,18 @@ func TestThreadStats(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte("")) - c.Write([]byte("\n")) - c.Write([]byte("foobard}\n")) - c.Write([]byte(ex3)) - c.Write([]byte("\n")) - c.Close() - acc.Wait(1) + _, err = c.Write([]byte("")) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + _, err = c.Write([]byte("foobard}\n")) + require.NoError(t, err) + _, err = c.Write([]byte(ex3)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) + acc.Wait(2) expected := []telegraf.Metric{ testutil.MustMetric( @@ -139,7 +211,7 @@ func TestThreadStats(t *testing.T) { } func TestSuricataInvalid(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -158,9 +230,11 @@ func TestSuricataInvalid(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte("sfjiowef")) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte("sfjiowef")) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) } @@ -179,7 +253,7 @@ func TestSuricataInvalidPath(t *testing.T) { } func TestSuricataTooLongLine(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -197,16 +271,17 @@ func TestSuricataTooLongLine(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(strings.Repeat("X", 20000000))) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(strings.Repeat("X", 20000000))) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) - } func TestSuricataEmptyJSON(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -224,16 +299,16 @@ func TestSuricataEmptyJSON(t *testing.T) { c, err := net.Dial("unix", tmpfn) if err != nil { log.Println(err) - } - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) } func TestSuricataDisconnectSocket(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -251,21 +326,25 @@ func TestSuricataDisconnectSocket(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex2)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex2)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) c, err = net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex3)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex3)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(2) } func TestSuricataStartStop(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -280,3 +359,41 @@ func TestSuricataStartStop(t *testing.T) { require.NoError(t, s.Start(&acc)) s.Stop() } + +func TestSuricataParse(t *testing.T) { + tests := []struct { + filename string + expected []telegraf.Metric + }{{ + filename: "test2.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "suricata", + map[string]string{ + "thread": "W#01-ens2f1", + }, + map[string]interface{}{ + "detect_alert": float64(0), + "detect_engines_id": float64(0), + "detect_engines_last_reload": "2021-06-08T06:33:05.084872+0000", + "detect_engines_rules_failed": float64(0), + "detect_engines_rules_loaded": float64(22712), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tc := range tests { + data, err := os.ReadFile("testdata/" + tc.filename) + require.NoError(t, err) + s := Suricata{ + Delimiter: "_", + } + acc := testutil.Accumulator{} + s.parse(&acc, data) + + testutil.RequireMetricsEqual(t, tc.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + } +} diff --git a/plugins/inputs/suricata/suricata_testutil.go b/plugins/inputs/suricata/suricata_testutil.go deleted file mode 100644 index 55aa2bb9bae69..0000000000000 --- a/plugins/inputs/suricata/suricata_testutil.go +++ /dev/null @@ -1,38 +0,0 @@ -package suricata - -import ( - "bytes" - "sync" -) - -// A thread-safe Buffer wrapper to enable concurrent access to log output. -type buffer struct { - b bytes.Buffer - m sync.Mutex -} - -func (b *buffer) Read(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - return b.b.Read(p) -} -func (b *buffer) Write(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - return b.b.Write(p) -} -func (b *buffer) String() string { - b.m.Lock() - defer b.m.Unlock() - return b.b.String() -} -func (b *buffer) Reset() { - b.m.Lock() - defer b.m.Unlock() - b.b.Reset() -} -func (b *buffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.b.Bytes() -} diff --git a/plugins/inputs/suricata/testdata/test2.json b/plugins/inputs/suricata/testdata/test2.json new file mode 100644 index 0000000000000..edb7d245df1fd --- /dev/null +++ b/plugins/inputs/suricata/testdata/test2.json @@ -0,0 +1,21 @@ +{ + "timestamp": "2021-06-08T06:34:49.237367+0000", + "event_type": "stats", + "stats": { + "threads": { + "W#01-ens2f1": { + "detect": { + "engines": [ + { + "id": 0, + "last_reload": "2021-06-08T06:33:05.084872+0000", + "rules_loaded": 22712, + "rules_failed": 0 + } + ], + "alert": 0 + } + } + } + } +} diff --git a/plugins/inputs/suricata/testdata/test3.json b/plugins/inputs/suricata/testdata/test3.json new file mode 100644 index 0000000000000..3e8649e66a14a --- /dev/null +++ b/plugins/inputs/suricata/testdata/test3.json @@ -0,0 +1 @@ +{"timestamp":"2021-05-30T20:07:13.208777+0200","flow_id":1696236471136137,"in_iface":"s1-suricata","event_type":"alert","src_ip":"10.0.0.5","src_port":18715,"dest_ip":"179.60.192.3","dest_port":80,"proto":"TCP","alert":{"action":"allowed","gid":1,"source":{"ip":"10.0.0.5","port":18715},"target":{"ip":"179.60.192.3","port":80},"signature_id":6,"rev":0,"signature":"Corrupted HTTP body","severity": 3,"category":"Misc activity","severity":3},"flow":{"pkts_toserver":1,"pkts_toclient":0,"bytes_toserver":174,"bytes_toclient":0,"start":"2021-05-30T20:07:13.208777+0200"}} diff --git a/plugins/inputs/swap/swap.go b/plugins/inputs/swap/swap.go index eabb40a038e7d..c7c614c1ba83b 100644 --- a/plugins/inputs/swap/swap.go +++ b/plugins/inputs/swap/swap.go @@ -12,14 +12,14 @@ type SwapStats struct { ps system.PS } -func (_ *SwapStats) Description() string { +func (ss *SwapStats) Description() string { return "Read metrics about swap memory usage" } -func (_ *SwapStats) SampleConfig() string { return "" } +func (ss *SwapStats) SampleConfig() string { return "" } -func (s *SwapStats) Gather(acc telegraf.Accumulator) error { - swap, err := s.ps.SwapStat() +func (ss *SwapStats) Gather(acc telegraf.Accumulator) error { + swap, err := ss.ps.SwapStat() if err != nil { return fmt.Errorf("error getting swap memory info: %s", err) } diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go index bcc9729384282..93cd26e3343f3 100644 --- a/plugins/inputs/synproxy/synproxy_linux.go +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go index 71a223644d8ed..f12fc70656eba 100644 --- a/plugins/inputs/synproxy/synproxy_notlinux.go +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index 83d752ff16f8c..e8fbe62989055 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package synproxy import ( - "io/ioutil" "os" "testing" @@ -59,6 +59,8 @@ func TestSynproxyFileInvalidHex(t *testing.T) { func TestNoSynproxyFile(t *testing.T) { tmpfile := makeFakeSynproxyFile([]byte(synproxyFileNormal)) // Remove file to generate "no such file" error + // Ignore errors if file does not yet exist + //nolint:errcheck,revive os.Remove(tmpfile) k := Synproxy{ @@ -153,7 +155,7 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string } func makeFakeSynproxyFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "synproxy_test") + tmpfile, err := os.CreateTemp("", "synproxy_test") if err != nil { panic(err) } diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 32c5f2717b630..a821a642b0ec8 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -55,6 +55,11 @@ Syslog messages should be formatted according to ## By default best effort parsing is off. # best_effort = false + ## The RFC standard to use for message parsing + ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) + ## Must be one of "RFC5424", or "RFC3164". + # syslog_standard = "RFC5424" + ## Character to prepend to SD-PARAMs (default = "_"). ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] @@ -155,9 +160,12 @@ echo "<13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc -u 127.0.0. #### RFC3164 -RFC3164 encoded messages are not currently supported. You may see the following error if a message encoded in this format: -``` -E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] -``` +RFC3164 encoded messages are supported for UDP only, but not all vendors output valid RFC3164 messages by default + +- E.g. Cisco IOS -You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. +If you see the following error, it is due to a message encoded in this format: + ``` + E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] + ``` + You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. \ No newline at end of file diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go index 10f2ddf511d22..1764c891ad7b4 100644 --- a/plugins/inputs/syslog/commons_test.go +++ b/plugins/inputs/syslog/commons_test.go @@ -4,7 +4,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" ) @@ -29,30 +29,30 @@ type testCaseStream struct { werr int // how many errors we expect in the strict mode? } -func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { +func newUDPSyslogReceiver(address string, bestEffort bool, rfc syslogRFC) *Syslog { return &Syslog{ Address: address, now: func() time.Time { return defaultTime }, - BestEffort: bestEffort, - Separator: "_", + BestEffort: bestEffort, + SyslogStandard: rfc, + Separator: "_", } } -func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { - d := &internal.Duration{ - Duration: defaultReadTimeout, - } +func newTCPSyslogReceiver(address string, keepAlive *config.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { + d := config.Duration(defaultReadTimeout) s := &Syslog{ Address: address, now: func() time.Time { return defaultTime }, - Framing: f, - ReadTimeout: d, - BestEffort: bestEffort, - Separator: "_", + Framing: f, + ReadTimeout: &d, + BestEffort: bestEffort, + SyslogStandard: syslogRFC5424, + Separator: "_", } if keepAlive != nil { s.KeepAlivePeriod = keepAlive diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index d0352c6ae1c7f..7782ad968a3b1 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -2,18 +2,18 @@ package syslog import ( "crypto/tls" - "io/ioutil" "net" "os" "path/filepath" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForNonTransparent() []testCaseStream { @@ -135,11 +135,11 @@ func getTestCasesForNonTransparent() []testCaseStream { return testCases } -func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *config.Duration) { for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 10, false, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -157,12 +157,14 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan require.NoError(t, e) config.ServerName = "localhost" conn, err = tls.Dial(protocol, address, config) + require.NotNil(t, conn) + require.NoError(t, err) } else { conn, err = net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) defer conn.Close() } - require.NotNil(t, conn) - require.NoError(t, err) // Clear acc.ClearMetrics() @@ -191,11 +193,12 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan } } -func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool) { + keepAlive := (*config.Duration)(nil) for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 10, true, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -244,7 +247,7 @@ func TestNonTransparentStrict_tcp(t *testing.T) { } func TestNonTransparentBestEffort_tcp(t *testing.T) { - testBestEffortNonTransparent(t, "tcp", address, false, nil) + testBestEffortNonTransparent(t, "tcp", address, false) } func TestNonTransparentStrict_tcp_tls(t *testing.T) { @@ -252,19 +255,21 @@ func TestNonTransparentStrict_tcp_tls(t *testing.T) { } func TestNonTransparentBestEffort_tcp_tls(t *testing.T) { - testBestEffortNonTransparent(t, "tcp", address, true, nil) + testBestEffortNonTransparent(t, "tcp", address, true) } func TestNonTransparentStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) + d := config.Duration(time.Minute) + testStrictNonTransparent(t, "tcp", address, true, &d) } func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: 0}) + d := config.Duration(0) + testStrictNonTransparent(t, "tcp", address, true, &d) } func TestNonTransparentStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -272,15 +277,15 @@ func TestNonTransparentStrict_unix(t *testing.T) { } func TestNonTransparentBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortNonTransparent(t, "unix", sock, false, nil) + testBestEffortNonTransparent(t, "unix", sock, false) } func TestNonTransparentStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -288,9 +293,9 @@ func TestNonTransparentStrict_unix_tls(t *testing.T) { } func TestNonTransparentBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortNonTransparent(t, "unix", sock, true, nil) + testBestEffortNonTransparent(t, "unix", sock, true) } diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 210b64dbe11c8..1c0cc024507e2 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -3,18 +3,18 @@ package syslog import ( "crypto/tls" "fmt" - "io/ioutil" "net" "os" "path/filepath" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForOctetCounting() []testCaseStream { @@ -335,7 +335,7 @@ func getTestCasesForOctetCounting() []testCaseStream { return testCases } -func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *config.Duration) { for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver @@ -357,12 +357,14 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want require.NoError(t, e) config.ServerName = "localhost" conn, err = tls.Dial(protocol, address, config) + require.NotNil(t, conn) + require.NoError(t, err) } else { conn, err = net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) defer conn.Close() } - require.NotNil(t, conn) - require.NoError(t, err) // Clear acc.ClearMetrics() @@ -391,7 +393,8 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want } } -func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool) { + keepAlive := (*config.Duration)(nil) for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver @@ -444,7 +447,7 @@ func TestOctetCountingStrict_tcp(t *testing.T) { } func TestOctetCountingBestEffort_tcp(t *testing.T) { - testBestEffortOctetCounting(t, "tcp", address, false, nil) + testBestEffortOctetCounting(t, "tcp", address, false) } func TestOctetCountingStrict_tcp_tls(t *testing.T) { @@ -452,19 +455,21 @@ func TestOctetCountingStrict_tcp_tls(t *testing.T) { } func TestOctetCountingBestEffort_tcp_tls(t *testing.T) { - testBestEffortOctetCounting(t, "tcp", address, true, nil) + testBestEffortOctetCounting(t, "tcp", address, true) } func TestOctetCountingStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) + d := config.Duration(time.Minute) + testStrictOctetCounting(t, "tcp", address, true, &d) } func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: 0}) + d := config.Duration(0) + testStrictOctetCounting(t, "tcp", address, true, &d) } func TestOctetCountingStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -472,15 +477,15 @@ func TestOctetCountingStrict_unix(t *testing.T) { } func TestOctetCountingBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortOctetCounting(t, "unix", sock, false, nil) + testBestEffortOctetCounting(t, "unix", sock, false) } func TestOctetCountingStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -488,9 +493,9 @@ func TestOctetCountingStrict_unix_tls(t *testing.T) { } func TestOctetCountingBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortOctetCounting(t, "unix", sock, true, nil) + testBestEffortOctetCounting(t, "unix", sock, true) } diff --git a/plugins/inputs/syslog/rfc3164_test.go b/plugins/inputs/syslog/rfc3164_test.go new file mode 100644 index 0000000000000..bd192a6d92a39 --- /dev/null +++ b/plugins/inputs/syslog/rfc3164_test.go @@ -0,0 +1,123 @@ +package syslog + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func timeMustParse(value string) time.Time { + format := "Jan 2 15:04:05 2006" + t, err := time.Parse(format, value) + if err != nil { + panic(fmt.Sprintf("couldn't parse time: %v", value)) + } + return t +} + +func getTestCasesForRFC3164() []testCasePacket { + currentYear := time.Now().Year() + ts := timeMustParse(fmt.Sprintf("Dec 2 16:31:03 %d", currentYear)).UnixNano() + testCases := []testCasePacket{ + { + name: "complete", + data: []byte("<13>Dec 2 16:31:03 host app: Test"), + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "appname": "app", + "severity": "notice", + "hostname": "host", + "facility": "user", + }, + map[string]interface{}{ + "timestamp": ts, + "message": "Test", + "facility_code": 1, + "severity_code": 5, + }, + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ + "appname": "app", + "severity": "notice", + "hostname": "host", + "facility": "user", + }, + map[string]interface{}{ + "timestamp": ts, + "message": "Test", + "facility_code": 1, + "severity_code": 5, + }, + defaultTime, + ), + }, + } + + return testCases +} + +func testRFC3164(t *testing.T, protocol string, address string, bestEffort bool) { + for _, tc := range getTestCasesForRFC3164() { + t.Run(tc.name, func(t *testing.T) { + // Create receiver + receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort, syslogRFC3164) + acc := &testutil.Accumulator{} + require.NoError(t, receiver.Start(acc)) + defer receiver.Stop() + + // Connect + conn, err := net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) + + // Write + _, err = conn.Write(tc.data) + conn.Close() + if err != nil { + if err, ok := err.(*net.OpError); ok { + if err.Err.Error() == "write: message too long" { + return + } + } + } + + // Waiting ... + if tc.wantStrict == nil && tc.werr || bestEffort && tc.werr { + acc.WaitError(1) + } + if tc.wantBestEffort != nil && bestEffort || tc.wantStrict != nil && !bestEffort { + acc.Wait(1) // RFC3164 mandates a syslog message per UDP packet + } + + // Compare + var got telegraf.Metric + var want telegraf.Metric + if len(acc.Metrics) > 0 { + got = acc.GetTelegrafMetrics()[0] + } + if bestEffort { + want = tc.wantBestEffort + } else { + want = tc.wantStrict + } + testutil.RequireMetricEqual(t, want, got) + }) + } +} + +func TestRFC3164BestEffort_udp(t *testing.T) { + testRFC3164(t, "udp", address, true) +} + +func TestRFC3164Strict_udp(t *testing.T) { + testRFC3164(t, "udp", address, false) +} diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 31007bad928a3..5bcb847b36ec4 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -2,17 +2,18 @@ package syslog import ( "fmt" - "io/ioutil" "net" "os" "path/filepath" + "runtime" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForRFC5426() []testCasePacket { @@ -230,7 +231,7 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) for _, tc := range getTestCasesForRFC5426() { t.Run(tc.name, func(t *testing.T) { // Create receiver - receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort) + receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort, syslogRFC5424) acc := &testutil.Accumulator{} require.NoError(t, receiver.Start(acc)) defer receiver.Stop() @@ -284,20 +285,30 @@ func TestStrict_udp(t *testing.T) { } func TestBestEffort_unixgram(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unixgram.sock") - os.Create(sock) + _, err = os.Create(sock) + require.NoError(t, err) testRFC5426(t, "unixgram", sock, true) } func TestStrict_unixgram(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unixgram.sock") - os.Create(sock) + _, err = os.Create(sock) + require.NoError(t, err) testRFC5426(t, "unixgram", sock, false) } @@ -313,10 +324,11 @@ func TestTimeIncrement_udp(t *testing.T) { // Create receiver receiver := &Syslog{ - Address: "udp://" + address, - now: getNow, - BestEffort: false, - Separator: "_", + Address: "udp://" + address, + now: getNow, + BestEffort: false, + SyslogStandard: syslogRFC5424, + Separator: "_", } acc := &testutil.Accumulator{} require.NoError(t, receiver.Start(acc)) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 6b3615a3e80ce..fc7eab1fa0828 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -7,33 +7,40 @@ import ( "net" "net/url" "os" + "path/filepath" "strings" "sync" "time" "unicode" - "github.com/influxdata/go-syslog/v2" - "github.com/influxdata/go-syslog/v2/nontransparent" - "github.com/influxdata/go-syslog/v2/octetcounting" - "github.com/influxdata/go-syslog/v2/rfc5424" + syslog "github.com/influxdata/go-syslog/v3" + "github.com/influxdata/go-syslog/v3/nontransparent" + "github.com/influxdata/go-syslog/v3/octetcounting" + "github.com/influxdata/go-syslog/v3/rfc3164" + "github.com/influxdata/go-syslog/v3/rfc5424" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" tlsConfig "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +type syslogRFC string + const defaultReadTimeout = time.Second * 5 const ipMaxPacketSize = 64 * 1024 +const syslogRFC3164 = "RFC3164" +const syslogRFC5424 = "RFC5424" // Syslog is a syslog plugin type Syslog struct { tlsConfig.ServerConfig Address string `toml:"server"` - KeepAlivePeriod *internal.Duration + KeepAlivePeriod *config.Duration MaxConnections int - ReadTimeout *internal.Duration + ReadTimeout *config.Duration Framing framing.Framing + SyslogStandard syslogRFC Trailer nontransparent.TrailerType BestEffort bool Separator string `toml:"sdparam_separator"` @@ -95,6 +102,11 @@ var sampleConfig = ` ## By default best effort parsing is off. # best_effort = false + ## The RFC standard to use for message parsing + ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) + ## Must be one of "RFC5424", or "RFC3164". + # syslog_standard = "RFC5424" + ## Character to prepend to SD-PARAMs (default = "_"). ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] @@ -139,6 +151,8 @@ func (s *Syslog) Start(acc telegraf.Accumulator) error { } if scheme == "unix" || scheme == "unixpacket" || scheme == "unixgram" { + // Accept success and failure in case the file does not exist + //nolint:errcheck,revive os.Remove(s.Address) } @@ -181,6 +195,8 @@ func (s *Syslog) Stop() { defer s.mu.Unlock() if s.Closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.Close() } s.wg.Wait() @@ -195,7 +211,10 @@ func getAddressParts(a string) (string, string, error) { return "", "", fmt.Errorf("missing protocol within address '%s'", a) } - u, _ := url.Parse(a) + u, err := url.Parse(filepath.ToSlash(a)) //convert backslashes to slashes (to make Windows path a valid URL) + if err != nil { + return "", "", fmt.Errorf("could not parse address '%s': %v", a, err) + } switch u.Scheme { case "unix", "unixpacket", "unixgram": return parts[0], parts[1], nil @@ -219,10 +238,15 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { defer s.wg.Done() b := make([]byte, ipMaxPacketSize) var p syslog.Machine - if s.BestEffort { - p = rfc5424.NewParser(rfc5424.WithBestEffort()) - } else { + switch { + case !s.BestEffort && s.SyslogStandard == syslogRFC5424: p = rfc5424.NewParser() + case s.BestEffort && s.SyslogStandard == syslogRFC5424: + p = rfc5424.NewParser(rfc5424.WithBestEffort()) + case !s.BestEffort && s.SyslogStandard == syslogRFC3164: + p = rfc3164.NewParser(rfc3164.WithYear(rfc3164.CurrentYear{})) + case s.BestEffort && s.SyslogStandard == syslogRFC3164: + p = rfc3164.NewParser(rfc3164.WithYear(rfc3164.CurrentYear{}), rfc3164.WithBestEffort()) } for { n, _, err := s.udpListener.ReadFrom(b) @@ -264,7 +288,9 @@ func (s *Syslog) listenStream(acc telegraf.Accumulator) { s.connectionsMu.Lock() if s.MaxConnections > 0 && len(s.connections) >= s.MaxConnections { s.connectionsMu.Unlock() - conn.Close() + if err := conn.Close(); err != nil { + acc.AddError(err) + } continue } s.connections[conn.RemoteAddr().String()] = conn @@ -279,7 +305,9 @@ func (s *Syslog) listenStream(acc telegraf.Accumulator) { s.connectionsMu.Lock() for _, c := range s.connections { - c.Close() + if err := c.Close(); err != nil { + acc.AddError(err) + } } s.connectionsMu.Unlock() } @@ -293,6 +321,8 @@ func (s *Syslog) removeConnection(c net.Conn) { func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { defer func() { s.removeConnection(conn) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() }() @@ -300,8 +330,10 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { emit := func(r *syslog.Result) { s.store(*r, acc) - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil { + acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) + } } } @@ -325,8 +357,10 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { p.Parse(conn) - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil { + acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) + } } } @@ -335,13 +369,13 @@ func (s *Syslog) setKeepAlive(c *net.TCPConn) error { return nil } - if s.KeepAlivePeriod.Duration == 0 { + if *s.KeepAlivePeriod == 0 { return c.SetKeepAlive(false) } if err := c.SetKeepAlive(true); err != nil { return err } - return c.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) + return c.SetKeepAlivePeriod(time.Duration(*s.KeepAlivePeriod)) } func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { @@ -360,58 +394,70 @@ func tags(msg syslog.Message) map[string]string { ts["severity"] = *msg.SeverityShortLevel() ts["facility"] = *msg.FacilityLevel() - if msg.Hostname() != nil { - ts["hostname"] = *msg.Hostname() - } - - if msg.Appname() != nil { - ts["appname"] = *msg.Appname() + switch m := msg.(type) { + case *rfc5424.SyslogMessage: + populateCommonTags(&m.Base, ts) + case *rfc3164.SyslogMessage: + populateCommonTags(&m.Base, ts) } - return ts } func fields(msg syslog.Message, s *Syslog) map[string]interface{} { - // Not checking assuming a minimally valid message - flds := map[string]interface{}{ - "version": msg.Version(), + flds := map[string]interface{}{} + + switch m := msg.(type) { + case *rfc5424.SyslogMessage: + populateCommonFields(&m.Base, flds) + // Not checking assuming a minimally valid message + flds["version"] = m.Version + + if m.StructuredData != nil { + for sdid, sdparams := range *m.StructuredData { + if len(sdparams) == 0 { + // When SD-ID does not have params we indicate its presence with a bool + flds[sdid] = true + continue + } + for name, value := range sdparams { + // Using whitespace as separator since it is not allowed by the grammar within SDID + flds[sdid+s.Separator+name] = value + } + } + } + case *rfc3164.SyslogMessage: + populateCommonFields(&m.Base, flds) } - flds["severity_code"] = int(*msg.Severity()) - flds["facility_code"] = int(*msg.Facility()) - if msg.Timestamp() != nil { - flds["timestamp"] = (*msg.Timestamp()).UnixNano() - } + return flds +} - if msg.ProcID() != nil { - flds["procid"] = *msg.ProcID() +func populateCommonFields(msg *syslog.Base, flds map[string]interface{}) { + flds["facility_code"] = int(*msg.Facility) + flds["severity_code"] = int(*msg.Severity) + if msg.Timestamp != nil { + flds["timestamp"] = (*msg.Timestamp).UnixNano() } - - if msg.MsgID() != nil { - flds["msgid"] = *msg.MsgID() + if msg.ProcID != nil { + flds["procid"] = *msg.ProcID } - - if msg.Message() != nil { - flds["message"] = strings.TrimRightFunc(*msg.Message(), func(r rune) bool { + if msg.MsgID != nil { + flds["msgid"] = *msg.MsgID + } + if msg.Message != nil { + flds["message"] = strings.TrimRightFunc(*msg.Message, func(r rune) bool { return unicode.IsSpace(r) }) } +} - if msg.StructuredData() != nil { - for sdid, sdparams := range *msg.StructuredData() { - if len(sdparams) == 0 { - // When SD-ID does not have params we indicate its presence with a bool - flds[sdid] = true - continue - } - for name, value := range sdparams { - // Using whitespace as separator since it is not allowed by the grammar within SDID - flds[sdid+s.Separator+name] = value - } - } +func populateCommonTags(msg *syslog.Base, ts map[string]string) { + if msg.Hostname != nil { + ts["hostname"] = *msg.Hostname + } + if msg.Appname != nil { + ts["appname"] = *msg.Appname } - - return flds } type unixCloser struct { @@ -421,7 +467,9 @@ type unixCloser struct { func (uc unixCloser) Close() error { err := uc.closer.Close() - os.Remove(uc.path) // ignore error + // Accept success and failure in case the file does not exist + //nolint:errcheck,revive + os.Remove(uc.path) return err } @@ -439,16 +487,16 @@ func getNanoNow() time.Time { } func init() { + defaultTimeout := config.Duration(defaultReadTimeout) inputs.Add("syslog", func() telegraf.Input { return &Syslog{ - Address: ":6514", - now: getNanoNow, - ReadTimeout: &internal.Duration{ - Duration: defaultReadTimeout, - }, - Framing: framing.OctetCounting, - Trailer: nontransparent.LF, - Separator: "_", + Address: ":6514", + now: getNanoNow, + ReadTimeout: &defaultTimeout, + Framing: framing.OctetCounting, + SyslogStandard: syslogRFC5424, + Trailer: nontransparent.LF, + Separator: "_", } }) } diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index 66568380e95a6..00146fde9cd26 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -1,15 +1,16 @@ package syslog import ( - "io/ioutil" "os" "path/filepath" + "runtime" "strings" "testing" "time" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const ( @@ -44,18 +45,21 @@ func TestAddress(t *testing.T) { require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") require.Error(t, err) - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") defer os.RemoveAll(tmpdir) require.NoError(t, err) sock := filepath.Join(tmpdir, "syslog.TestAddress.sock") - rec = &Syslog{ - Address: "unixgram://" + sock, + if runtime.GOOS != "windows" { + // Skipping on Windows, as unixgram sockets are not supported + rec = &Syslog{ + Address: "unixgram://" + sock, + } + err = rec.Start(&testutil.Accumulator{}) + require.NoError(t, err) + require.Equal(t, sock, rec.Address) + rec.Stop() } - err = rec.Start(&testutil.Accumulator{}) - require.NoError(t, err) - require.Equal(t, sock, rec.Address) - rec.Stop() // Default port is 6514 rec = &Syslog{ diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 9f530024b52d8..7e69ff41ccdf2 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat @@ -16,6 +17,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -33,7 +35,7 @@ type Sysstat struct { Sadc string `toml:"sadc_path"` // Force the execution time of sadc - SadcInterval internal.Duration `toml:"sadc_interval"` + SadcInterval config.Duration `toml:"sadc_interval"` // Sadf represents the path to the sadf cmd. Sadf string `toml:"sadf_path"` @@ -135,9 +137,9 @@ func (*Sysstat) SampleConfig() string { } func (s *Sysstat) Gather(acc telegraf.Accumulator) error { - if s.SadcInterval.Duration != 0 { + if time.Duration(s.SadcInterval) != 0 { // Collect interval is calculated as interval - parseInterval - s.interval = int(s.SadcInterval.Duration.Seconds()) + parseInterval + s.interval = int(time.Duration(s.SadcInterval).Seconds()) + parseInterval } if s.interval == 0 { @@ -273,7 +275,6 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e tags[k] = v } } - } } @@ -285,7 +286,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e tags: make(map[string]string), } } - g, _ := m[device] + g := m[device] if len(g.tags) == 0 { for k, v := range tags { g.tags[k] = v @@ -299,7 +300,6 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e } acc.AddFields(measurement, fields, tags, ts) } - } if s.Group { for _, v := range m { diff --git a/plugins/inputs/sysstat/sysstat_interval_test.go b/plugins/inputs/sysstat/sysstat_interval_test.go index 972eb9af936de..f714ec10b1c36 100644 --- a/plugins/inputs/sysstat/sysstat_interval_test.go +++ b/plugins/inputs/sysstat/sysstat_interval_test.go @@ -1,5 +1,5 @@ -// +build !race -// +build linux +//go:build !race && linux +// +build !race,linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_notlinux.go b/plugins/inputs/sysstat/sysstat_notlinux.go index e97e71e78280c..6b5dd6fcf18cb 100644 --- a/plugins/inputs/sysstat/sysstat_notlinux.go +++ b/plugins/inputs/sysstat/sysstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 4aecfaacc2a15..64b596bb329ba 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat @@ -10,6 +11,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var s = Sysstat{ @@ -303,7 +305,8 @@ dell-xps 5 2016-03-25 16:18:10 UTC sdb %util 0.30 switch path.Base(cmd) { case "sadf": - fmt.Fprint(os.Stdout, mockData[args[3]]) + _, err := fmt.Fprint(os.Stdout, mockData[args[3]]) + require.NoError(t, err) default: } // some code here to check arguments perhaps? diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index b3cf2c1707f5d..e1bd4f84b48e7 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -37,7 +37,7 @@ func (m *MockPS) LoadAvg() (*load.AvgStat, error) { return r0, r1 } -func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { +func (m *MockPS) CPUTimes(_, _ bool) ([]cpu.TimesStat, error) { ret := m.Called() r0 := ret.Get(0).([]cpu.TimesStat) @@ -74,7 +74,7 @@ func (m *MockPS) NetProto() ([]net.ProtoCountersStat, error) { return r0, r1 } -func (m *MockPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { +func (m *MockPS) DiskIO(_ []string) (map[string]disk.IOCountersStat, error) { ret := m.Called() r0 := ret.Get(0).(map[string]disk.IOCountersStat) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 824dbe446d5be..d835d02633d02 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -5,7 +5,6 @@ import ( "path/filepath" "strings" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/shirou/gopsutil/cpu" @@ -34,13 +33,6 @@ type PSDiskDeps interface { PSDiskUsage(path string) (*disk.UsageStat, error) } -func add(acc telegraf.Accumulator, - name string, val float64, tags map[string]string) { - if val >= 0 { - acc.AddFields(name, map[string]interface{}{"value": val}, tags) - } -} - func NewSystemPS() *SystemPS { return &SystemPS{&SystemPSDisk{}} } @@ -155,7 +147,7 @@ func (s *SystemPS) NetConnections() ([]net.ConnectionStat, error) { func (s *SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { m, err := disk.IOCounters(names...) - if err == internal.NotImplementedError { + if err == internal.ErrorNotImplemented { return nil, nil } diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index 32747cca20314..ded0e8ba18a22 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -86,6 +86,8 @@ func formatUptime(uptime uint64) string { if days > 1 { s = "s" } + // This will always succeed, so skip checking the error + //nolint:errcheck,revive fmt.Fprintf(w, "%d day%s, ", days, s) } @@ -94,8 +96,12 @@ func formatUptime(uptime uint64) string { hours %= 24 minutes %= 60 + // This will always succeed, so skip checking the error + //nolint:errcheck,revive fmt.Fprintf(w, "%2d:%02d", hours, minutes) + // This will always succeed, so skip checking the error + //nolint:errcheck,revive w.Flush() return buf.String() } diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index fc8306dee2da9..f9d47d7df1252 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -1,7 +1,7 @@ # systemd Units Input Plugin The systemd_units plugin gathers systemd unit status on Linux. It relies on -`systemctl list-units --all --type=service` to collect data on service status. +`systemctl list-units [PATTERN] --all --plain --type=service` to collect data on service status. The results are tagged with the unit name and provide enumerated fields for loaded, active and running fields, indicating the unit health. @@ -22,6 +22,13 @@ see `systemctl list-units --all --type help` for possible options. ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ``` ### Metrics diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index 64caf03d007f3..e41c64752977e 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -9,25 +9,27 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) // SystemdUnits is a telegraf plugin to gather systemd unit status type SystemdUnits struct { - Timeout internal.Duration + Timeout config.Duration UnitType string `toml:"unittype"` + Pattern string `toml:"pattern"` systemctl systemctl } -type systemctl func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) +type systemctl func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) const measurement = "systemd_units" // Below are mappings of systemd state tables as defined in // https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c // Duplicate strings are removed from this list. -var load_map = map[string]int{ +var loadMap = map[string]int{ "loaded": 0, "stub": 1, "not-found": 2, @@ -37,7 +39,7 @@ var load_map = map[string]int{ "masked": 6, } -var active_map = map[string]int{ +var activeMap = map[string]int{ "active": 0, "reloading": 1, "inactive": 2, @@ -46,7 +48,7 @@ var active_map = map[string]int{ "deactivating": 5, } -var sub_map = map[string]int{ +var subMap = map[string]int{ // service_state_table, offset 0x0000 "running": 0x0000, "dead": 0x0001, @@ -112,8 +114,9 @@ var sub_map = map[string]int{ } var ( - defaultTimeout = internal.Duration{Duration: time.Second} + defaultTimeout = config.Duration(time.Second) defaultUnitType = "service" + defaultPattern = "" ) // Description returns a short description of the plugin @@ -131,12 +134,19 @@ func (s *SystemdUnits) SampleConfig() string { ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ` } // Gather parses systemctl outputs and adds counters to the Accumulator func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { - out, err := s.systemctl(s.Timeout, s.UnitType) + out, err := s.systemctl(s.Timeout, s.UnitType, s.Pattern) if err != nil { return err } @@ -162,27 +172,27 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { } var ( - load_code int - active_code int - sub_code int - ok bool + loadCode int + activeCode int + subCode int + ok bool ) - if load_code, ok = load_map[load]; !ok { + if loadCode, ok = loadMap[load]; !ok { acc.AddError(fmt.Errorf("Error parsing field 'load', value not in map: %s", load)) continue } - if active_code, ok = active_map[active]; !ok { + if activeCode, ok = activeMap[active]; !ok { acc.AddError(fmt.Errorf("Error parsing field 'active', value not in map: %s", active)) continue } - if sub_code, ok = sub_map[sub]; !ok { + if subCode, ok = subMap[sub]; !ok { acc.AddError(fmt.Errorf("Error parsing field 'sub', value not in map: %s", sub)) continue } fields := map[string]interface{}{ - "load_code": load_code, - "active_code": active_code, - "sub_code": sub_code, + "load_code": loadCode, + "active_code": activeCode, + "sub_code": subCode, } acc.AddFields(measurement, fields, tags) @@ -191,22 +201,32 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { return nil } -func setSystemctl(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { +func setSystemctl(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { // is systemctl available ? systemctlPath, err := exec.LookPath("systemctl") if err != nil { return nil, err } - - cmd := exec.Command(systemctlPath, "list-units", "--all", fmt.Sprintf("--type=%s", UnitType), "--no-legend") - + // build parameters for systemctl call + params := []string{"list-units"} + // create patterns parameters if provided in config + if pattern != "" { + psplit := strings.SplitN(pattern, " ", -1) + for v := range psplit { + params = append(params, psplit[v]) + } + } + params = append(params, "--all", "--plain") + // add type as configured in config + params = append(params, fmt.Sprintf("--type=%s", unitType)) + params = append(params, "--no-legend") + cmd := exec.Command(systemctlPath, params...) var out bytes.Buffer cmd.Stdout = &out - err = internal.RunTimeout(cmd, Timeout.Duration) + err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { - return &out, fmt.Errorf("error running systemctl list-units --all --type=%s --no-legend: %s", UnitType, err) + return &out, fmt.Errorf("error running systemctl %s: %s", strings.Join(params, " "), err) } - return &out, nil } @@ -216,6 +236,7 @@ func init() { systemctl: setSystemctl, Timeout: defaultTimeout, UnitType: defaultUnitType, + Pattern: defaultPattern, } }) } diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go index f45922bb91af0..05070c6ff5e94 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux_test.go +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -6,7 +6,7 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -73,13 +73,13 @@ func TestSystemdUnits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - systemd_units := &SystemdUnits{ - systemctl: func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { + systemdUnits := &SystemdUnits{ + systemctl: func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.line), nil }, } acc := new(testutil.Accumulator) - err := acc.GatherError(systemd_units.Gather) + err := acc.GatherError(systemdUnits.Gather) if !reflect.DeepEqual(tt.err, err) { t.Errorf("%s: expected error '%#v' got '%#v'", tt.name, tt.err, err) } diff --git a/plugins/inputs/systemd_units/systemd_units_notlinux.go b/plugins/inputs/systemd_units/systemd_units_notlinux.go index f53cea3de6eba..32f5b97cc37ec 100644 --- a/plugins/inputs/systemd_units/systemd_units_notlinux.go +++ b/plugins/inputs/systemd_units/systemd_units_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package systemd_units diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md index 7f5315038a2ea..abdf0878aff56 100644 --- a/plugins/inputs/tail/README.md +++ b/plugins/inputs/tail/README.md @@ -29,7 +29,8 @@ The plugin expects messages in one of the ## "/var/log/**.log" -> recursively find all .log files in /var/log ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log ## "/var/log/apache.log" -> just tail the apache log file - ## + ## "/var/log/log[!1-2]* -> tail files without 1-2 + ## "/var/log/log[^1-2]* -> identical behavior as above ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] @@ -63,6 +64,9 @@ The plugin expects messages in one of the ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. + # path_tag = "path" + ## multiline parser/codec ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html #[inputs.tail.multiline] diff --git a/plugins/inputs/tail/multiline.go b/plugins/inputs/tail/multiline.go index 7a254c1bf9676..7ea2e460b88d6 100644 --- a/plugins/inputs/tail/multiline.go +++ b/plugins/inputs/tail/multiline.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) // Indicates relation to the multiline event: previous or next @@ -23,7 +23,7 @@ type MultilineConfig struct { Pattern string MatchWhichLine MultilineMatchWhichLine `toml:"match_which_line"` InvertMatch bool - Timeout *internal.Duration + Timeout *config.Duration } const ( @@ -43,8 +43,9 @@ func (m *MultilineConfig) NewMultiline() (*Multiline, error) { if r, err = regexp.Compile(m.Pattern); err != nil { return nil, err } - if m.Timeout == nil || m.Timeout.Duration.Nanoseconds() == int64(0) { - m.Timeout = &internal.Duration{Duration: 5 * time.Second} + if m.Timeout == nil || time.Duration(*m.Timeout).Nanoseconds() == int64(0) { + d := config.Duration(5 * time.Second) + m.Timeout = &d } } @@ -60,6 +61,8 @@ func (m *Multiline) IsEnabled() bool { func (m *Multiline) ProcessLine(text string, buffer *bytes.Buffer) string { if m.matchString(text) { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive buffer.WriteString(text) return "" } @@ -67,12 +70,16 @@ func (m *Multiline) ProcessLine(text string, buffer *bytes.Buffer) string { if m.config.MatchWhichLine == Previous { previousText := buffer.String() buffer.Reset() - buffer.WriteString(text) + if _, err := buffer.WriteString(text); err != nil { + return "" + } text = previousText } else { // Next if buffer.Len() > 0 { - buffer.WriteString(text) + if _, err := buffer.WriteString(text); err != nil { + return "" + } text = buffer.String() buffer.Reset() } diff --git a/plugins/inputs/tail/multiline_test.go b/plugins/inputs/tail/multiline_test.go index 6db50dc048b99..26a7e80292772 100644 --- a/plugins/inputs/tail/multiline_test.go +++ b/plugins/inputs/tail/multiline_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/stretchr/testify/assert" ) @@ -32,20 +32,20 @@ func TestMultilineConfigError(t *testing.T) { } func TestMultilineConfigTimeoutSpecified(t *testing.T) { - duration, _ := time.ParseDuration("10s") + duration := config.Duration(10 * time.Second) c := &MultilineConfig{ Pattern: ".*", MatchWhichLine: Previous, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } m, err := c.NewMultiline() assert.NoError(t, err, "Configuration was OK.") - assert.Equal(t, duration, m.config.Timeout.Duration) + assert.Equal(t, duration, *m.config.Timeout) } func TestMultilineConfigDefaultTimeout(t *testing.T) { - duration, _ := time.ParseDuration("5s") + duration := config.Duration(5 * time.Second) c := &MultilineConfig{ Pattern: ".*", MatchWhichLine: Previous, @@ -53,7 +53,7 @@ func TestMultilineConfigDefaultTimeout(t *testing.T) { m, err := c.NewMultiline() assert.NoError(t, err, "Configuration was OK.") - assert.Equal(t, duration, m.config.Timeout.Duration) + assert.Equal(t, duration, *m.config.Timeout) } func TestMultilineIsEnabled(t *testing.T) { @@ -103,7 +103,8 @@ func TestMultilineFlush(t *testing.T) { m, err := c.NewMultiline() assert.NoError(t, err, "Configuration was OK.") var buffer bytes.Buffer - buffer.WriteString("foo") + _, err = buffer.WriteString("foo") + assert.NoError(t, err) text := m.Flush(&buffer) @@ -205,31 +206,30 @@ func TestMultiLineMatchStringWithInvertTrue(t *testing.T) { func TestMultilineWhat(t *testing.T) { var w1 MultilineMatchWhichLine - w1.UnmarshalTOML([]byte(`"previous"`)) + assert.NoError(t, w1.UnmarshalTOML([]byte(`"previous"`))) assert.Equal(t, Previous, w1) var w2 MultilineMatchWhichLine - w2.UnmarshalTOML([]byte(`previous`)) + assert.NoError(t, w2.UnmarshalTOML([]byte(`previous`))) assert.Equal(t, Previous, w2) var w3 MultilineMatchWhichLine - w3.UnmarshalTOML([]byte(`'previous'`)) + assert.NoError(t, w3.UnmarshalTOML([]byte(`'previous'`))) assert.Equal(t, Previous, w3) var w4 MultilineMatchWhichLine - w4.UnmarshalTOML([]byte(`"next"`)) + assert.NoError(t, w4.UnmarshalTOML([]byte(`"next"`))) assert.Equal(t, Next, w4) var w5 MultilineMatchWhichLine - w5.UnmarshalTOML([]byte(`next`)) + assert.NoError(t, w5.UnmarshalTOML([]byte(`next`))) assert.Equal(t, Next, w5) var w6 MultilineMatchWhichLine - w6.UnmarshalTOML([]byte(`'next'`)) + assert.NoError(t, w6.UnmarshalTOML([]byte(`'next'`))) assert.Equal(t, Next, w6) var w7 MultilineMatchWhichLine - err := w7.UnmarshalTOML([]byte(`nope`)) + assert.Error(t, w7.UnmarshalTOML([]byte(`nope`))) assert.Equal(t, MultilineMatchWhichLine(-1), w7) - assert.Error(t, err) } diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index fdb5b40cc3abd..d5bda84732ad8 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -1,3 +1,4 @@ +//go:build !solaris // +build !solaris package tail @@ -22,8 +23,7 @@ import ( ) const ( - defaultWatchMethod = "inotify" - defaultMaxUndeliveredLines = 1000 + defaultWatchMethod = "inotify" ) var ( @@ -41,6 +41,7 @@ type Tail struct { WatchMethod string `toml:"watch_method"` MaxUndeliveredLines int `toml:"max_undelivered_lines"` CharacterEncoding string `toml:"character_encoding"` + PathTag string `toml:"path_tag"` Log telegraf.Logger `toml:"-"` tailers map[string]*tail.Tail @@ -71,6 +72,7 @@ func NewTail() *Tail { FromBeginning: false, MaxUndeliveredLines: 1000, offsets: offsetsCopy, + PathTag: "path", } } @@ -81,7 +83,8 @@ const sampleConfig = ` ## "/var/log/**.log" -> recursively find all .log files in /var/log ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log ## "/var/log/apache.log" -> just tail the apache log file - ## + ## "/var/log/log[!1-2]* -> tail files without 1-2 + ## "/var/log/log[^1-2]* -> identical behavior as above ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] @@ -115,6 +118,9 @@ const sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. + # path_tag = "path" + ## multiline parser/codec ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html #[inputs.tail.multiline] @@ -156,7 +162,7 @@ func (t *Tail) Init() error { return err } -func (t *Tail) Gather(acc telegraf.Accumulator) error { +func (t *Tail) Gather(_ telegraf.Accumulator) error { return t.tailNewFiles(true) } @@ -290,17 +296,17 @@ func parseLine(parser parsers.Parser, line string, firstLine bool) ([]telegraf.M // line from the file. if firstLine { return parser.Parse([]byte(line)) - } else { - m, err := parser.ParseLine(line) - if err != nil { - return nil, err - } + } - if m != nil { - return []telegraf.Metric{m}, nil - } - return []telegraf.Metric{}, nil + m, err := parser.ParseLine(line) + if err != nil { + return nil, err } + + if m != nil { + return []telegraf.Metric{m}, nil + } + return []telegraf.Metric{}, nil default: return parser.Parse([]byte(line)) } @@ -320,7 +326,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { // The multiline mode requires a timer in order to flush the multiline buffer // if no new lines are incoming. if t.multiline.IsEnabled() { - timer = time.NewTimer(t.MultilineConfig.Timeout.Duration) + timer = time.NewTimer(time.Duration(*t.MultilineConfig.Timeout)) timeout = timer.C } @@ -332,7 +338,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { line = nil if timer != nil { - timer.Reset(t.MultilineConfig.Timeout.Duration) + timer.Reset(time.Duration(*t.MultilineConfig.Timeout)) } select { @@ -380,8 +386,10 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { } firstLine = false - for _, metric := range metrics { - metric.AddTag("path", tailer.Filename) + if t.PathTag != "" { + for _, metric := range metrics { + metric.AddTag(t.PathTag, tailer.Filename) + } } // try writing out metric first without blocking diff --git a/plugins/inputs/tail/tail_solaris.go b/plugins/inputs/tail/tail_solaris.go index 802088da28248..093dd16a06c23 100644 --- a/plugins/inputs/tail/tail_solaris.go +++ b/plugins/inputs/tail/tail_solaris.go @@ -1,5 +1,6 @@ // Skipping plugin on Solaris due to fsnotify support // +//go:build solaris // +build solaris package tail diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 5669fbf2e6ea7..1098a10edbff5 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -2,27 +2,53 @@ package tail import ( "bytes" - "io/ioutil" "log" "os" + "path/filepath" "runtime" - "strings" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) +var ( + testdataDir = getTestdataDir() +) + +func NewTestTail() *Tail { + offsetsMutex.Lock() + offsetsCopy := make(map[string]int64, len(offsets)) + for k, v := range offsets { + offsetsCopy[k] = v + } + offsetsMutex.Unlock() + watchMethod := defaultWatchMethod + + if runtime.GOOS == "windows" { + watchMethod = "poll" + } + + return &Tail{ + FromBeginning: false, + MaxUndeliveredLines: 1000, + offsets: offsetsCopy, + WatchMethod: watchMethod, + PathTag: "path", + } +} + func TestTailBadLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -33,12 +59,12 @@ func TestTailBadLine(t *testing.T) { _, err = tmpfile.WriteString("cpu usage_idle=100\n") require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) buf := &bytes.Buffer{} log.SetOutput(buf) - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -58,15 +84,15 @@ func TestTailBadLine(t *testing.T) { assert.Contains(t, buf.String(), "Malformed log line") } -func TestTailDosLineendings(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") +func TestTailDosLineEndings(t *testing.T) { + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -92,19 +118,18 @@ func TestTailDosLineendings(t *testing.T) { } func TestGrokParseLogFilesWithMultiline(t *testing.T) { - thisdir := getCurrentDir() //we make sure the timeout won't kick in - duration, _ := time.ParseDuration("100s") - + d, _ := time.ParseDuration("100s") + duration := config.Duration(d) tt := NewTail() tt.Log = testutil.Logger{} tt.FromBeginning = true - tt.Files = []string{thisdir + "testdata/test_multiline.log"} + tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} tt.MultilineConfig = MultilineConfig{ Pattern: `^[^\[]`, MatchWhichLine: Previous, InvertMatch: false, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } tt.SetParserFunc(createGrokParser) @@ -117,7 +142,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { acc.Wait(3) - expectedPath := thisdir + "testdata/test_multiline.log" + expectedPath := filepath.Join(testdataDir, "test_multiline.log") acc.AssertContainsTaggedFields(t, "tail_grok", map[string]interface{}{ "message": "HelloExample: This is debug", @@ -147,19 +172,20 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { } func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) - // This seems neccessary in order to get the test to read the following lines. + // This seems necessary in order to get the test to read the following lines. _, err = tmpfile.WriteString("[04/Jun/2016:12:41:48 +0100] INFO HelloExample: This is fluff\r\n") require.NoError(t, err) require.NoError(t, tmpfile.Sync()) // set tight timeout for tests - duration := 10 * time.Millisecond - + d := 10 * time.Millisecond + duration := config.Duration(d) tt := NewTail() + tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -167,7 +193,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { Pattern: `^[^\[]`, MatchWhichLine: Previous, InvertMatch: false, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } tt.SetParserFunc(createGrokParser) @@ -209,19 +235,18 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { } func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *testing.T) { - thisdir := getCurrentDir() //we make sure the timeout won't kick in - duration := 100 * time.Second + duration := config.Duration(100 * time.Second) - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true - tt.Files = []string{thisdir + "testdata/test_multiline.log"} + tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} tt.MultilineConfig = MultilineConfig{ Pattern: `^[^\[]`, MatchWhichLine: Previous, InvertMatch: false, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } tt.SetParserFunc(createGrokParser) @@ -236,7 +261,7 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test tt.Stop() acc.Wait(4) - expectedPath := thisdir + "testdata/test_multiline.log" + expectedPath := filepath.Join(testdataDir, "test_multiline.log") acc.AssertContainsTaggedFields(t, "tail_grok", map[string]interface{}{ "message": "HelloExample: This is warn", @@ -251,7 +276,7 @@ func createGrokParser() (parsers.Parser, error) { grokConfig := &parsers.Config{ MetricName: "tail_grok", GrokPatterns: []string{"%{TEST_LOG_MULTILINE}"}, - GrokCustomPatternFiles: []string{getCurrentDir() + "testdata/test-patterns"}, + GrokCustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, DataFormat: "grok", } parser, err := parsers.NewParser(grokConfig) @@ -260,7 +285,7 @@ func createGrokParser() (parsers.Parser, error) { // The csv parser should only parse the header line once per file. func TestCSVHeadersParsedOnce(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -270,9 +295,9 @@ cpu,42 cpu,42 `) require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) - plugin := NewTail() + plugin := NewTestTail() plugin.Log = testutil.Logger{} plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} @@ -319,7 +344,7 @@ cpu,42 // Ensure that the first line can produce multiple metrics (#6138) func TestMultipleMetricsOnFirstLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -327,12 +352,13 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { [{"time_idle": 42}, {"time_idle": 42}] `) require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) - plugin := NewTail() + plugin := NewTestTail() plugin.Log = testutil.Logger{} plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} + plugin.PathTag = "customPathTagMyFile" plugin.SetParserFunc(func() (parsers.Parser, error) { return json.New( &json.Config{ @@ -355,7 +381,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { expected := []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "customPathTagMyFile": tmpfile.Name(), }, map[string]interface{}{ "time_idle": 42.0, @@ -363,7 +389,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { time.Unix(0, 0)), testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "customPathTagMyFile": tmpfile.Name(), }, map[string]interface{}{ "time_idle": 42.0, @@ -374,11 +400,6 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { testutil.IgnoreTime()) } -func getCurrentDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "tail_test.go", "", 1) -} - func TestCharacterEncoding(t *testing.T) { full := []telegraf.Metric{ testutil.MustMetric("cpu", @@ -428,89 +449,86 @@ func TestCharacterEncoding(t *testing.T) { ), } + watchMethod := defaultWatchMethod + if runtime.GOOS == "windows" { + watchMethod = "poll" + } + tests := []struct { - name string - plugin *Tail - offset int64 - expected []telegraf.Metric + name string + testfiles string + fromBeginning bool + characterEncoding string + offset int64 + expected []telegraf.Metric }{ { - name: "utf-8", - plugin: &Tail{ - Files: []string{"testdata/cpu-utf-8.influx"}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - expected: full, + name: "utf-8", + testfiles: "cpu-utf-8.influx", + fromBeginning: true, + characterEncoding: "utf-8", + expected: full, }, { - name: "utf-8 seek", - plugin: &Tail{ - Files: []string{"testdata/cpu-utf-8.influx"}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - offset: 0x33, - expected: full[1:], + name: "utf-8 seek", + testfiles: "cpu-utf-8.influx", + characterEncoding: "utf-8", + offset: 0x33, + expected: full[1:], }, { - name: "utf-16le", - plugin: &Tail{ - Files: []string{"testdata/cpu-utf-16le.influx"}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - expected: full, + name: "utf-16le", + testfiles: "cpu-utf-16le.influx", + fromBeginning: true, + characterEncoding: "utf-16le", + expected: full, }, { - name: "utf-16le seek", - plugin: &Tail{ - Files: []string{"testdata/cpu-utf-16le.influx"}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - offset: 0x68, - expected: full[1:], + name: "utf-16le seek", + testfiles: "cpu-utf-16le.influx", + characterEncoding: "utf-16le", + offset: 0x68, + expected: full[1:], }, { - name: "utf-16be", - plugin: &Tail{ - Files: []string{"testdata/cpu-utf-16be.influx"}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16be", - }, - expected: full, + name: "utf-16be", + testfiles: "cpu-utf-16be.influx", + fromBeginning: true, + characterEncoding: "utf-16be", + expected: full, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.plugin.SetParserFunc(func() (parsers.Parser, error) { + + plugin := &Tail{ + Files: []string{filepath.Join(testdataDir, tt.testfiles)}, + FromBeginning: tt.fromBeginning, + MaxUndeliveredLines: 1000, + Log: testutil.Logger{}, + CharacterEncoding: tt.characterEncoding, + WatchMethod: watchMethod, + } + + plugin.SetParserFunc(func() (parsers.Parser, error) { handler := influx.NewMetricHandler() return influx.NewParser(handler), nil }) if tt.offset != 0 { - tt.plugin.offsets = map[string]int64{ - tt.plugin.Files[0]: tt.offset, + plugin.offsets = map[string]int64{ + plugin.Files[0]: tt.offset, } } - err := tt.plugin.Init() + err := plugin.Init() require.NoError(t, err) var acc testutil.Accumulator - err = tt.plugin.Start(&acc) + err = plugin.Start(&acc) require.NoError(t, err) acc.Wait(len(tt.expected)) - tt.plugin.Stop() + plugin.Stop() actual := acc.GetTelegrafMetrics() for _, m := range actual { @@ -523,7 +541,7 @@ func TestCharacterEncoding(t *testing.T) { } func TestTailEOF(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\n") @@ -531,7 +549,7 @@ func TestTailEOF(t *testing.T) { err = tmpfile.Sync() require.NoError(t, err) - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -565,3 +583,13 @@ func TestTailEOF(t *testing.T) { err = tmpfile.Close() require.NoError(t, err) } + +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") +} diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index 41b8e463766ba..aedaa7276b41e 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -14,7 +14,7 @@ import ( "github.com/influxdata/telegraf/selfstat" ) -type TcpListener struct { +type TCPListener struct { ServiceAddress string AllowedPendingMessages int MaxTCPConnections int `toml:"max_tcp_connections"` @@ -65,26 +65,26 @@ const sampleConfig = ` # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener ` -func (t *TcpListener) SampleConfig() string { +func (t *TCPListener) SampleConfig() string { return sampleConfig } -func (t *TcpListener) Description() string { +func (t *TCPListener) Description() string { return "Generic TCP listener" } // All the work is done in the Start() function, so this is just a dummy // function. -func (t *TcpListener) Gather(_ telegraf.Accumulator) error { +func (t *TCPListener) Gather(_ telegraf.Accumulator) error { return nil } -func (t *TcpListener) SetParser(parser parsers.Parser) { +func (t *TCPListener) SetParser(parser parsers.Parser) { t.parser = parser } // Start starts the tcp listener service. -func (t *TcpListener) Start(acc telegraf.Accumulator) error { +func (t *TCPListener) Start(acc telegraf.Accumulator) error { t.Lock() defer t.Unlock() @@ -129,10 +129,12 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error { } // Stop cleans up all resources -func (t *TcpListener) Stop() { +func (t *TCPListener) Stop() { t.Lock() defer t.Unlock() close(t.done) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive t.listener.Close() // Close all open TCP connections @@ -146,6 +148,8 @@ func (t *TcpListener) Stop() { } t.cleanup.Unlock() for _, conn := range conns { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() } @@ -155,18 +159,19 @@ func (t *TcpListener) Stop() { } // tcpListen listens for incoming TCP connections. -func (t *TcpListener) tcpListen() error { +func (t *TCPListener) tcpListen() { defer t.wg.Done() for { select { case <-t.done: - return nil + return default: // Accept connection: conn, err := t.listener.AcceptTCP() if err != nil { - return err + t.Log.Errorf("accepting TCP connection failed: %v", err) + return } select { @@ -186,24 +191,28 @@ func (t *TcpListener) tcpListen() error { } // refuser refuses a TCP connection -func (t *TcpListener) refuser(conn *net.TCPConn) { +func (t *TCPListener) refuser(conn *net.TCPConn) { // Tell the connection why we are closing. + //nolint:errcheck,revive fmt.Fprintf(conn, "Telegraf maximum concurrent TCP connections (%d)"+ " reached, closing.\nYou may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", t.MaxTCPConnections) + //nolint:errcheck,revive conn.Close() t.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) t.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") } // handler handles a single TCP Connection -func (t *TcpListener) handler(conn *net.TCPConn, id string) { +func (t *TCPListener) handler(conn *net.TCPConn, id string) { t.CurrentConnections.Incr(1) t.TotalConnections.Incr(1) // connection cleanup function defer func() { t.wg.Done() - conn.Close() + if err := conn.Close(); err != nil { + t.acc.AddError(err) + } // Add one connection potential back to channel when this one closes t.accept <- true t.forget(id) @@ -243,7 +252,7 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) { } // tcpParser parses the incoming tcp byte packets -func (t *TcpListener) tcpParser() error { +func (t *TCPListener) tcpParser() { defer t.wg.Done() var packet []byte @@ -254,7 +263,7 @@ func (t *TcpListener) tcpParser() error { case <-t.done: // drain input packets before finishing: if len(t.in) == 0 { - return nil + return } case packet = <-t.in: if len(packet) == 0 { @@ -276,14 +285,14 @@ func (t *TcpListener) tcpParser() error { } // forget a TCP connection -func (t *TcpListener) forget(id string) { +func (t *TCPListener) forget(id string) { t.cleanup.Lock() defer t.cleanup.Unlock() delete(t.conns, id) } // remember a TCP connection -func (t *TcpListener) remember(id string, conn *net.TCPConn) { +func (t *TCPListener) remember(id string, conn *net.TCPConn) { t.cleanup.Lock() defer t.cleanup.Unlock() t.conns[id] = conn @@ -291,7 +300,7 @@ func (t *TcpListener) remember(id string, conn *net.TCPConn) { func init() { inputs.Add("tcp_listener", func() telegraf.Input { - return &TcpListener{ + return &TCPListener{ ServiceAddress: ":8094", AllowedPendingMessages: 10000, MaxTCPConnections: 250, diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index bb83f0465bd77..9203318aff73e 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -30,9 +30,9 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 ` ) -func newTestTcpListener() (*TcpListener, chan []byte) { +func newTestTCPListener() (*TCPListener, chan []byte) { in := make(chan []byte, 1500) - listener := &TcpListener{ + listener := &TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, @@ -45,7 +45,7 @@ func newTestTcpListener() (*TcpListener, chan []byte) { // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8198", AllowedPendingMessages: 100000, @@ -56,28 +56,26 @@ func BenchmarkTCP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) conn, err := net.Dial("tcp", "127.0.0.1:8198") - if err != nil { - panic(err) - } + require.NoError(b, err) for i := 0; i < 100000; i++ { - fmt.Fprintf(conn, testMsg) + _, err := fmt.Fprint(conn, testMsg) + require.NoError(b, err) } - conn.(*net.TCPConn).CloseWrite() + require.NoError(b, conn.(*net.TCPConn).CloseWrite()) // wait for all 100,000 metrics to be processed buf := []byte{0} - conn.Read(buf) // will EOF when completed + // will EOF when completed + _, err = conn.Read(buf) + require.NoError(b, err) listener.Stop() } } func TestHighTrafficTCP(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8199", AllowedPendingMessages: 100000, @@ -87,15 +85,15 @@ func TestHighTrafficTCP(t *testing.T) { acc := &testutil.Accumulator{} // send multiple messages to socket - err := listener.Start(acc) - require.NoError(t, err) + require.NoError(t, listener.Start(acc)) conn, err := net.Dial("tcp", "127.0.0.1:8199") require.NoError(t, err) for i := 0; i < 100000; i++ { - fmt.Fprintf(conn, testMsg) + _, err := fmt.Fprint(conn, testMsg) + require.NoError(t, err) } - conn.(*net.TCPConn).CloseWrite() + require.NoError(t, conn.(*net.TCPConn).CloseWrite()) buf := []byte{0} _, err = conn.Read(buf) assert.Equal(t, err, io.EOF) @@ -105,7 +103,7 @@ func TestHighTrafficTCP(t *testing.T) { } func TestConnectTCP(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, @@ -121,7 +119,8 @@ func TestConnectTCP(t *testing.T) { require.NoError(t, err) // send single message to socket - fmt.Fprintf(conn, testMsg) + _, err = fmt.Fprint(conn, testMsg) + require.NoError(t, err) acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, @@ -129,7 +128,8 @@ func TestConnectTCP(t *testing.T) { ) // send multiple messages to socket - fmt.Fprintf(conn, testMsgs) + _, err = fmt.Fprint(conn, testMsgs) + require.NoError(t, err) acc.Wait(6) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} @@ -143,7 +143,7 @@ func TestConnectTCP(t *testing.T) { // Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, @@ -156,17 +156,18 @@ func TestConcurrentConns(t *testing.T) { defer listener.Stop() _, err := net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8195") + require.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8195") + require.NoError(t, err) buf := make([]byte, 1500) n, err := conn.Read(buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "Telegraf maximum concurrent TCP connections (2) reached, closing.\n"+ "You may want to increase max_tcp_connections in"+ @@ -179,7 +180,7 @@ func TestConcurrentConns(t *testing.T) { // Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8196", AllowedPendingMessages: 10000, @@ -192,15 +193,16 @@ func TestConcurrentConns1(t *testing.T) { defer listener.Stop() _, err := net.Dial("tcp", "127.0.0.1:8196") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8196") - assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8196") + require.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8196") + require.NoError(t, err) buf := make([]byte, 1500) n, err := conn.Read(buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "Telegraf maximum concurrent TCP connections (1) reached, closing.\n"+ "You may want to increase max_tcp_connections in"+ @@ -213,7 +215,7 @@ func TestConcurrentConns1(t *testing.T) { // Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, @@ -225,9 +227,9 @@ func TestCloseConcurrentConns(t *testing.T) { require.NoError(t, listener.Start(acc)) _, err := net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) listener.Stop() } @@ -235,7 +237,7 @@ func TestCloseConcurrentConns(t *testing.T) { func TestRunParser(t *testing.T) { var testmsg = []byte(testMsg) - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -245,7 +247,7 @@ func TestRunParser(t *testing.T) { go listener.tcpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", @@ -254,10 +256,10 @@ func TestRunParser(t *testing.T) { ) } -func TestRunParserInvalidMsg(t *testing.T) { +func TestRunParserInvalidMsg(_ *testing.T) { var testmsg = []byte("cpu_load_short") - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -274,7 +276,7 @@ func TestRunParserInvalidMsg(t *testing.T) { scnr := bufio.NewScanner(buf) for scnr.Scan() { - if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) { + if strings.Contains(scnr.Text(), "tcp_listener has received 1 malformed packets thus far.") { break } } @@ -283,7 +285,7 @@ func TestRunParserInvalidMsg(t *testing.T) { func TestRunParserGraphiteMsg(t *testing.T) { var testmsg = []byte("cpu.load.graphite 12 1454780029") - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -293,7 +295,7 @@ func TestRunParserGraphiteMsg(t *testing.T) { go listener.tcpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_graphite", @@ -303,7 +305,7 @@ func TestRunParserGraphiteMsg(t *testing.T) { func TestRunParserJSONMsg(t *testing.T) { var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -316,7 +318,7 @@ func TestRunParserJSONMsg(t *testing.T) { go listener.tcpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsFields(t, "udp_json_test", diff --git a/plugins/inputs/teamspeak/README.md b/plugins/inputs/teamspeak/README.md index 4767bb7e35171..ef3f0d8d9377b 100644 --- a/plugins/inputs/teamspeak/README.md +++ b/plugins/inputs/teamspeak/README.md @@ -31,6 +31,7 @@ the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/T - packets_received_total - bytes_sent_total - bytes_received_total + - query_clients_online ### Tags: @@ -41,5 +42,5 @@ the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/T ### Example output: ``` -teamspeak,virtual_server=1,name=LeopoldsServer,host=vm01 bytes_received_total=29638202639i,uptime=13567846i,total_ping=26.89,total_packet_loss=0,packets_sent_total=415821252i,packets_received_total=237069900i,bytes_sent_total=55309568252i,clients_online=11i 1507406561000000000 +teamspeak,virtual_server=1,name=LeopoldsServer,host=vm01 bytes_received_total=29638202639i,uptime=13567846i,total_ping=26.89,total_packet_loss=0,packets_sent_total=415821252i,packets_received_total=237069900i,bytes_sent_total=55309568252i,clients_online=11i,query_clients_online=1i 1507406561000000000 ``` \ No newline at end of file diff --git a/plugins/inputs/teamspeak/teamspeak.go b/plugins/inputs/teamspeak/teamspeak.go index 91fdf1135d742..e6861f03e25af 100644 --- a/plugins/inputs/teamspeak/teamspeak.go +++ b/plugins/inputs/teamspeak/teamspeak.go @@ -55,7 +55,10 @@ func (ts *Teamspeak) Gather(acc telegraf.Accumulator) error { } for _, vserver := range ts.VirtualServers { - ts.client.Use(vserver) + if err := ts.client.Use(vserver); err != nil { + ts.connected = false + return err + } sm, err := ts.client.Server.Info() if err != nil { @@ -83,6 +86,7 @@ func (ts *Teamspeak) Gather(acc telegraf.Accumulator) error { "packets_received_total": sc.PacketsReceivedTotal, "bytes_sent_total": sc.BytesSentTotal, "bytes_received_total": sc.BytesReceivedTotal, + "query_clients_online": sm.QueryClientsOnline, } acc.AddFields("teamspeak", fields, tags) diff --git a/plugins/inputs/teamspeak/teamspeak_test.go b/plugins/inputs/teamspeak/teamspeak_test.go index b66948f289f3d..98fc5194849c7 100644 --- a/plugins/inputs/teamspeak/teamspeak_test.go +++ b/plugins/inputs/teamspeak/teamspeak_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) const welcome = `Welcome to the TeamSpeak 3 ServerQuery interface, type "help" for a list of commands and "help " for information on a specific command.` @@ -22,9 +23,7 @@ var cmd = map[string]string{ func TestGather(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("Initializing test server failed") - } + require.NoError(t, err, "Initializing test server failed") defer l.Close() go handleRequest(l, t) @@ -36,11 +35,7 @@ func TestGather(t *testing.T) { Password: "test", VirtualServers: []int{1}, } - err = testConfig.Gather(&acc) - - if err != nil { - t.Fatalf("Gather returned error. Error: %s\n", err) - } + require.NoError(t, testConfig.Gather(&acc), "Gather returned error. Error: %s\n", err) fields := map[string]interface{}{ "uptime": int(148), @@ -51,6 +46,7 @@ func TestGather(t *testing.T) { "packets_received_total": uint64(370), "bytes_sent_total": uint64(28058), "bytes_received_total": uint64(17468), + "query_clients_online": int(1), } acc.AssertContainsFields(t, "teamspeak", fields) @@ -58,10 +54,9 @@ func TestGather(t *testing.T) { func handleRequest(l net.Listener, t *testing.T) { c, err := l.Accept() - if err != nil { - t.Fatal("Error accepting test connection") - } - c.Write([]byte("TS3\n\r" + welcome + "\n\r")) + require.NoError(t, err, "Error accepting test connection") + _, err = c.Write([]byte("TS3\n\r" + welcome + "\n\r")) + require.NoError(t, err) for { msg, _, err := bufio.NewReader(c).ReadLine() if err != nil { @@ -72,16 +67,21 @@ func handleRequest(l net.Listener, t *testing.T) { if exists { switch r { case "": - c.Write([]byte(ok + "\n\r")) + _, err = c.Write([]byte(ok + "\n\r")) + require.NoError(t, err) case "quit": - c.Write([]byte(ok + "\n\r")) - c.Close() + _, err = c.Write([]byte(ok + "\n\r")) + require.NoError(t, err) + err = c.Close() + require.NoError(t, err) return default: - c.Write([]byte(r + "\n\r" + ok + "\n\r")) + _, err = c.Write([]byte(r + "\n\r" + ok + "\n\r")) + require.NoError(t, err) } } else { - c.Write([]byte(errorMsg + "\n\r")) + _, err = c.Write([]byte(errorMsg + "\n\r")) + require.NoError(t, err) } } } diff --git a/plugins/inputs/temp/temp_test.go b/plugins/inputs/temp/temp_test.go index 080ff66ac1848..9ced8ac14a2ef 100644 --- a/plugins/inputs/temp/temp_test.go +++ b/plugins/inputs/temp/temp_test.go @@ -34,5 +34,4 @@ func TestTemperature(t *testing.T) { "sensor": "coretemp_sensor1_crit", } acc.AssertContainsTaggedFields(t, "temp", expectedFields, expectedTags) - } diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go index c45ae81d10b60..1787354c22cff 100644 --- a/plugins/inputs/tengine/tengine.go +++ b/plugins/inputs/tengine/tengine.go @@ -14,14 +14,14 @@ import ( "io" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type Tengine struct { Urls []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *http.Client @@ -56,7 +56,7 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error { // Create an HTTP client that is re-used for each // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -73,7 +73,7 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -81,61 +81,61 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Tengine) createHttpClient() (*http.Client, error) { +func (n *Tengine) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil } type TengineStatus struct { - host string - bytes_in uint64 - bytes_out uint64 - conn_total uint64 - req_total uint64 - http_2xx uint64 - http_3xx uint64 - http_4xx uint64 - http_5xx uint64 - http_other_status uint64 - rt uint64 - ups_req uint64 - ups_rt uint64 - ups_tries uint64 - http_200 uint64 - http_206 uint64 - http_302 uint64 - http_304 uint64 - http_403 uint64 - http_404 uint64 - http_416 uint64 - http_499 uint64 - http_500 uint64 - http_502 uint64 - http_503 uint64 - http_504 uint64 - http_508 uint64 - http_other_detail_status uint64 - http_ups_4xx uint64 - http_ups_5xx uint64 + host string + bytesIn uint64 + bytesOut uint64 + connTotal uint64 + reqTotal uint64 + http2xx uint64 + http3xx uint64 + http4xx uint64 + http5xx uint64 + httpOtherStatus uint64 + rt uint64 + upsReq uint64 + upsRt uint64 + upsTries uint64 + http200 uint64 + http206 uint64 + http302 uint64 + http304 uint64 + http403 uint64 + http404 uint64 + http416 uint64 + http499 uint64 + http500 uint64 + http502 uint64 + http503 uint64 + http504 uint64 + http508 uint64 + httpOtherDetailStatus uint64 + httpUps4xx uint64 + httpUps5xx uint64 } -func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { - var tenginestatus TengineStatus +func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { + var tengineStatus TengineStatus resp, err := n.client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) @@ -152,170 +152,171 @@ func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { if err != nil || io.EOF == err { break } - line_split := strings.Split(strings.TrimSpace(line), ",") - if len(line_split) != 30 { + lineSplit := strings.Split(strings.TrimSpace(line), ",") + if len(lineSplit) != 30 { continue } - tenginestatus.host = line_split[0] + tengineStatus.host = lineSplit[0] if err != nil { return err } - tenginestatus.bytes_in, err = strconv.ParseUint(line_split[1], 10, 64) + tengineStatus.bytesIn, err = strconv.ParseUint(lineSplit[1], 10, 64) if err != nil { return err } - tenginestatus.bytes_out, err = strconv.ParseUint(line_split[2], 10, 64) + tengineStatus.bytesOut, err = strconv.ParseUint(lineSplit[2], 10, 64) if err != nil { return err } - tenginestatus.conn_total, err = strconv.ParseUint(line_split[3], 10, 64) + tengineStatus.connTotal, err = strconv.ParseUint(lineSplit[3], 10, 64) if err != nil { return err } - tenginestatus.req_total, err = strconv.ParseUint(line_split[4], 10, 64) + tengineStatus.reqTotal, err = strconv.ParseUint(lineSplit[4], 10, 64) if err != nil { return err } - tenginestatus.http_2xx, err = strconv.ParseUint(line_split[5], 10, 64) + tengineStatus.http2xx, err = strconv.ParseUint(lineSplit[5], 10, 64) if err != nil { return err } - tenginestatus.http_3xx, err = strconv.ParseUint(line_split[6], 10, 64) + tengineStatus.http3xx, err = strconv.ParseUint(lineSplit[6], 10, 64) if err != nil { return err } - tenginestatus.http_4xx, err = strconv.ParseUint(line_split[7], 10, 64) + tengineStatus.http4xx, err = strconv.ParseUint(lineSplit[7], 10, 64) if err != nil { return err } - tenginestatus.http_5xx, err = strconv.ParseUint(line_split[8], 10, 64) + tengineStatus.http5xx, err = strconv.ParseUint(lineSplit[8], 10, 64) if err != nil { return err } - tenginestatus.http_other_status, err = strconv.ParseUint(line_split[9], 10, 64) + tengineStatus.httpOtherStatus, err = strconv.ParseUint(lineSplit[9], 10, 64) if err != nil { return err } - tenginestatus.rt, err = strconv.ParseUint(line_split[10], 10, 64) + tengineStatus.rt, err = strconv.ParseUint(lineSplit[10], 10, 64) if err != nil { return err } - tenginestatus.ups_req, err = strconv.ParseUint(line_split[11], 10, 64) + tengineStatus.upsReq, err = strconv.ParseUint(lineSplit[11], 10, 64) if err != nil { return err } - tenginestatus.ups_rt, err = strconv.ParseUint(line_split[12], 10, 64) + tengineStatus.upsRt, err = strconv.ParseUint(lineSplit[12], 10, 64) if err != nil { return err } - tenginestatus.ups_tries, err = strconv.ParseUint(line_split[13], 10, 64) + tengineStatus.upsTries, err = strconv.ParseUint(lineSplit[13], 10, 64) if err != nil { return err } - tenginestatus.http_200, err = strconv.ParseUint(line_split[14], 10, 64) + tengineStatus.http200, err = strconv.ParseUint(lineSplit[14], 10, 64) if err != nil { return err } - tenginestatus.http_206, err = strconv.ParseUint(line_split[15], 10, 64) + tengineStatus.http206, err = strconv.ParseUint(lineSplit[15], 10, 64) if err != nil { return err } - tenginestatus.http_302, err = strconv.ParseUint(line_split[16], 10, 64) + tengineStatus.http302, err = strconv.ParseUint(lineSplit[16], 10, 64) if err != nil { return err } - tenginestatus.http_304, err = strconv.ParseUint(line_split[17], 10, 64) + tengineStatus.http304, err = strconv.ParseUint(lineSplit[17], 10, 64) if err != nil { return err } - tenginestatus.http_403, err = strconv.ParseUint(line_split[18], 10, 64) + tengineStatus.http403, err = strconv.ParseUint(lineSplit[18], 10, 64) if err != nil { return err } - tenginestatus.http_404, err = strconv.ParseUint(line_split[19], 10, 64) + tengineStatus.http404, err = strconv.ParseUint(lineSplit[19], 10, 64) if err != nil { return err } - tenginestatus.http_416, err = strconv.ParseUint(line_split[20], 10, 64) + tengineStatus.http416, err = strconv.ParseUint(lineSplit[20], 10, 64) if err != nil { return err } - tenginestatus.http_499, err = strconv.ParseUint(line_split[21], 10, 64) + tengineStatus.http499, err = strconv.ParseUint(lineSplit[21], 10, 64) if err != nil { return err } - tenginestatus.http_500, err = strconv.ParseUint(line_split[22], 10, 64) + tengineStatus.http500, err = strconv.ParseUint(lineSplit[22], 10, 64) if err != nil { return err } - tenginestatus.http_502, err = strconv.ParseUint(line_split[23], 10, 64) + tengineStatus.http502, err = strconv.ParseUint(lineSplit[23], 10, 64) if err != nil { return err } - tenginestatus.http_503, err = strconv.ParseUint(line_split[24], 10, 64) + tengineStatus.http503, err = strconv.ParseUint(lineSplit[24], 10, 64) if err != nil { return err } - tenginestatus.http_504, err = strconv.ParseUint(line_split[25], 10, 64) + tengineStatus.http504, err = strconv.ParseUint(lineSplit[25], 10, 64) if err != nil { return err } - tenginestatus.http_508, err = strconv.ParseUint(line_split[26], 10, 64) + tengineStatus.http508, err = strconv.ParseUint(lineSplit[26], 10, 64) if err != nil { return err } - tenginestatus.http_other_detail_status, err = strconv.ParseUint(line_split[27], 10, 64) + tengineStatus.httpOtherDetailStatus, err = strconv.ParseUint(lineSplit[27], 10, 64) if err != nil { return err } - tenginestatus.http_ups_4xx, err = strconv.ParseUint(line_split[28], 10, 64) + tengineStatus.httpUps4xx, err = strconv.ParseUint(lineSplit[28], 10, 64) if err != nil { return err } - tenginestatus.http_ups_5xx, err = strconv.ParseUint(line_split[29], 10, 64) + tengineStatus.httpUps5xx, err = strconv.ParseUint(lineSplit[29], 10, 64) if err != nil { return err } - tags := getTags(addr, tenginestatus.host) + tags := getTags(addr, tengineStatus.host) fields := map[string]interface{}{ - "bytes_in": tenginestatus.bytes_in, - "bytes_out": tenginestatus.bytes_out, - "conn_total": tenginestatus.conn_total, - "req_total": tenginestatus.req_total, - "http_2xx": tenginestatus.http_2xx, - "http_3xx": tenginestatus.http_3xx, - "http_4xx": tenginestatus.http_4xx, - "http_5xx": tenginestatus.http_5xx, - "http_other_status": tenginestatus.http_other_status, - "rt": tenginestatus.rt, - "ups_req": tenginestatus.ups_req, - "ups_rt": tenginestatus.ups_rt, - "ups_tries": tenginestatus.ups_tries, - "http_200": tenginestatus.http_200, - "http_206": tenginestatus.http_206, - "http_302": tenginestatus.http_302, - "http_304": tenginestatus.http_304, - "http_403": tenginestatus.http_403, - "http_404": tenginestatus.http_404, - "http_416": tenginestatus.http_416, - "http_499": tenginestatus.http_499, - "http_500": tenginestatus.http_500, - "http_502": tenginestatus.http_502, - "http_503": tenginestatus.http_503, - "http_504": tenginestatus.http_504, - "http_508": tenginestatus.http_508, - "http_other_detail_status": tenginestatus.http_other_detail_status, - "http_ups_4xx": tenginestatus.http_ups_4xx, - "http_ups_5xx": tenginestatus.http_ups_5xx, + "bytes_in": tengineStatus.bytesIn, + "bytes_out": tengineStatus.bytesOut, + "conn_total": tengineStatus.connTotal, + "req_total": tengineStatus.reqTotal, + "http_2xx": tengineStatus.http2xx, + "http_3xx": tengineStatus.http3xx, + "http_4xx": tengineStatus.http4xx, + "http_5xx": tengineStatus.http5xx, + "http_other_status": tengineStatus.httpOtherStatus, + "rt": tengineStatus.rt, + "ups_req": tengineStatus.upsReq, + "ups_rt": tengineStatus.upsRt, + "ups_tries": tengineStatus.upsTries, + "http_200": tengineStatus.http200, + "http_206": tengineStatus.http206, + "http_302": tengineStatus.http302, + "http_304": tengineStatus.http304, + "http_403": tengineStatus.http403, + "http_404": tengineStatus.http404, + "http_416": tengineStatus.http416, + "http_499": tengineStatus.http499, + "http_500": tengineStatus.http500, + "http_502": tengineStatus.http502, + "http_503": tengineStatus.http503, + "http_504": tengineStatus.http504, + "http_508": tengineStatus.http508, + "http_other_detail_status": tengineStatus.httpOtherDetailStatus, + "http_ups_4xx": tengineStatus.httpUps4xx, + "http_ups_5xx": tengineStatus.httpUps5xx, } acc.AddFields("tengine", fields, tags) } - return nil + // Return the potential error of the loop-read + return err } // Get tag(s) for the tengine plugin -func getTags(addr *url.URL, server_name string) map[string]string { +func getTags(addr *url.URL, serverName string) map[string]string { h := addr.Host host, port, err := net.SplitHostPort(h) if err != nil { @@ -328,7 +329,7 @@ func getTags(addr *url.URL, server_name string) map[string]string { port = "" } } - return map[string]string{"server": host, "port": port, "server_name": server_name} + return map[string]string{"server": host, "port": port, "server_name": serverName} } func init() { diff --git a/plugins/inputs/tengine/tengine_test.go b/plugins/inputs/tengine/tengine_test.go index 317820bb22acb..d91c97465aff1 100644 --- a/plugins/inputs/tengine/tengine_test.go +++ b/plugins/inputs/tengine/tengine_test.go @@ -28,9 +28,8 @@ func TestTengineTags(t *testing.T) { func TestTengineGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - rsp = tengineSampleResponse - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, tengineSampleResponse) + require.NoError(t, err) })) defer ts.Close() @@ -38,13 +37,13 @@ func TestTengineGeneratesMetrics(t *testing.T) { Urls: []string{fmt.Sprintf("%s/us", ts.URL)}, } - var acc_tengine testutil.Accumulator + var accTengine testutil.Accumulator - err_tengine := acc_tengine.GatherError(n.Gather) + errTengine := accTengine.GatherError(n.Gather) - require.NoError(t, err_tengine) + require.NoError(t, errTengine) - fields_tengine := map[string]interface{}{ + fieldsTengine := map[string]interface{}{ "bytes_in": uint64(784), "bytes_out": uint64(1511), "conn_total": uint64(2), @@ -93,5 +92,5 @@ func TestTengineGeneratesMetrics(t *testing.T) { } } tags := map[string]string{"server": host, "port": port, "server_name": "127.0.0.1"} - acc_tengine.AssertContainsTaggedFields(t, "tengine", fields_tengine, tags) + accTengine.AssertContainsTaggedFields(t, "tengine", fieldsTengine, tags) } diff --git a/plugins/inputs/tomcat/tomcat.go b/plugins/inputs/tomcat/tomcat.go index d32b0168a3d05..5b869fb4d8c76 100644 --- a/plugins/inputs/tomcat/tomcat.go +++ b/plugins/inputs/tomcat/tomcat.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -63,7 +63,7 @@ type Tomcat struct { URL string Username string Password string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -99,7 +99,7 @@ func (s *Tomcat) SampleConfig() string { func (s *Tomcat) Gather(acc telegraf.Accumulator) error { if s.client == nil { - client, err := s.createHttpClient() + client, err := s.createHTTPClient() if err != nil { return err } @@ -131,7 +131,9 @@ func (s *Tomcat) Gather(acc telegraf.Accumulator) error { } var status TomcatStatus - xml.NewDecoder(resp.Body).Decode(&status) + if err := xml.NewDecoder(resp.Body).Decode(&status); err != nil { + return err + } // add tomcat_jvm_memory measurements tcm := map[string]interface{}{ @@ -187,7 +189,7 @@ func (s *Tomcat) Gather(acc telegraf.Accumulator) error { return nil } -func (s *Tomcat) createHttpClient() (*http.Client, error) { +func (s *Tomcat) createHTTPClient() (*http.Client, error) { tlsConfig, err := s.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -197,7 +199,7 @@ func (s *Tomcat) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: s.Timeout.Duration, + Timeout: time.Duration(s.Timeout), } return client, nil @@ -209,7 +211,7 @@ func init() { URL: "http://127.0.0.1:8080/manager/status/all?XML=true", Username: "tomcat", Password: "s3cret", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/tomcat/tomcat_test.go b/plugins/inputs/tomcat/tomcat_test.go index 5e206ab835583..e22cb9c88c874 100644 --- a/plugins/inputs/tomcat/tomcat_test.go +++ b/plugins/inputs/tomcat/tomcat_test.go @@ -40,7 +40,8 @@ var tomcatStatus8 = ` func TestHTTPTomcat8(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, tomcatStatus8) + _, err := fmt.Fprintln(w, tomcatStatus8) + require.NoError(t, err) })) defer ts.Close() @@ -51,8 +52,7 @@ func TestHTTPTomcat8(t *testing.T) { } var acc testutil.Accumulator - err := tc.Gather(&acc) - require.NoError(t, err) + require.NoError(t, tc.Gather(&acc)) // tomcat_jvm_memory jvmMemoryFields := map[string]interface{}{ @@ -112,7 +112,8 @@ var tomcatStatus6 = ` func TestHTTPTomcat6(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, tomcatStatus6) + _, err := fmt.Fprintln(w, tomcatStatus6) + require.NoError(t, err) })) defer ts.Close() @@ -123,8 +124,7 @@ func TestHTTPTomcat6(t *testing.T) { } var acc testutil.Accumulator - err := tc.Gather(&acc) - require.NoError(t, err) + require.NoError(t, tc.Gather(&acc)) // tomcat_jvm_memory jvmMemoryFields := map[string]interface{}{ diff --git a/plugins/inputs/trig/README.md b/plugins/inputs/trig/README.md new file mode 100644 index 0000000000000..41ff8743e8cf3 --- /dev/null +++ b/plugins/inputs/trig/README.md @@ -0,0 +1,28 @@ +# Trig Input Plugin + +The `trig` plugin is for demonstration purposes and inserts sine and cosine + +### Configuration + +```toml +# Inserts sine and cosine waves for demonstration purposes +[[inputs.trig]] + ## Set the amplitude + amplitude = 10.0 +``` + +### Metrics + +- trig + - fields: + - cosine (float) + - sine (float) + + +### Example Output + +``` +trig,host=MBP15-SWANG.local cosine=10,sine=0 1632338680000000000 +trig,host=MBP15-SWANG.local sine=5.877852522924732,cosine=8.090169943749473 1632338690000000000 +trig,host=MBP15-SWANG.local sine=9.510565162951535,cosine=3.0901699437494745 1632338700000000000 +``` diff --git a/plugins/inputs/trig/trig_test.go b/plugins/inputs/trig/trig_test.go index 1471edbeaec2d..de4fa07886f05 100644 --- a/plugins/inputs/trig/trig_test.go +++ b/plugins/inputs/trig/trig_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestTrig(t *testing.T) { @@ -13,13 +14,12 @@ func TestTrig(t *testing.T) { } for i := 0.0; i < 10.0; i++ { - var acc testutil.Accumulator sine := math.Sin((i*math.Pi)/5.0) * s.Amplitude cosine := math.Cos((i*math.Pi)/5.0) * s.Amplitude - s.Gather(&acc) + require.NoError(t, s.Gather(&acc)) fields := make(map[string]interface{}) fields["sine"] = sine diff --git a/plugins/inputs/twemproxy/README.md b/plugins/inputs/twemproxy/README.md new file mode 100644 index 0000000000000..0c07e0aec4463 --- /dev/null +++ b/plugins/inputs/twemproxy/README.md @@ -0,0 +1,16 @@ +# Twemproxy Input Plugin + +The `twemproxy` plugin gathers statistics from [Twemproxy](https://github.com/twitter/twemproxy) servers. + + +### Configuration + +```toml +# Read Twemproxy stats data +[[inputs.twemproxy]] + ## Twemproxy stats address and port (no scheme) + addr = "localhost:22222" + ## Monitor pool name + pools = ["redis_pool", "mc_pool"] +``` + diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index cda56943f1002..b4c4b52f85b6c 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -3,7 +3,7 @@ package twemproxy import ( "encoding/json" "errors" - "io/ioutil" + "io" "net" "time" @@ -37,7 +37,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - body, err := ioutil.ReadAll(conn) + body, err := io.ReadAll(conn) if err != nil { return err } diff --git a/plugins/inputs/twemproxy/twemproxy_test.go b/plugins/inputs/twemproxy/twemproxy_test.go index dd79048e0a5f5..0da1694d557d8 100644 --- a/plugins/inputs/twemproxy/twemproxy_test.go +++ b/plugins/inputs/twemproxy/twemproxy_test.go @@ -67,8 +67,12 @@ func mockTwemproxyServer() (net.Listener, error) { go func(l net.Listener) { for { conn, _ := l.Accept() - conn.Write([]byte(sampleStats)) - conn.Close() + if _, err := conn.Write([]byte(sampleStats)); err != nil { + return + } + if err := conn.Close(); err != nil { + return + } break } }(listener) diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 7fa59fdb121bc..07cd79cb2a610 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -13,8 +13,8 @@ import ( "github.com/influxdata/telegraf/selfstat" ) -// UdpListener main struct for the collector -type UdpListener struct { +// UDPListener main struct for the collector +type UDPListener struct { ServiceAddress string // UDPBufferSize should only be set if you want/need the telegraf UDP socket to @@ -57,9 +57,9 @@ type UdpListener struct { Log telegraf.Logger } -// UDP_MAX_PACKET_SIZE is packet limit, see +// UDPMaxPacketSize is packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure -const UDP_MAX_PACKET_SIZE int = 64 * 1024 +const UDPMaxPacketSize int = 64 * 1024 var dropwarn = "udp_listener message queue full. " + "We have dropped %d messages so far. " + @@ -74,25 +74,25 @@ const sampleConfig = ` # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener ` -func (u *UdpListener) SampleConfig() string { +func (u *UDPListener) SampleConfig() string { return sampleConfig } -func (u *UdpListener) Description() string { +func (u *UDPListener) Description() string { return "Generic UDP listener" } // All the work is done in the Start() function, so this is just a dummy // function. -func (u *UdpListener) Gather(_ telegraf.Accumulator) error { +func (u *UDPListener) Gather(_ telegraf.Accumulator) error { return nil } -func (u *UdpListener) SetParser(parser parsers.Parser) { +func (u *UDPListener) SetParser(parser parsers.Parser) { u.parser = parser } -func (u *UdpListener) Start(acc telegraf.Accumulator) error { +func (u *UDPListener) Start(acc telegraf.Accumulator) error { u.Lock() defer u.Unlock() @@ -110,7 +110,9 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { u.in = make(chan []byte, u.AllowedPendingMessages) u.done = make(chan struct{}) - u.udpListen() + if err := u.udpListen(); err != nil { + return err + } u.wg.Add(1) go u.udpParser() @@ -119,17 +121,19 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { return nil } -func (u *UdpListener) Stop() { +func (u *UDPListener) Stop() { u.Lock() defer u.Unlock() close(u.done) u.wg.Wait() + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive u.listener.Close() close(u.in) u.Log.Infof("Stopped service on %q", u.ServiceAddress) } -func (u *UdpListener) udpListen() error { +func (u *UDPListener) udpListen() error { var err error address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress) @@ -153,16 +157,18 @@ func (u *UdpListener) udpListen() error { return nil } -func (u *UdpListener) udpListenLoop() { +func (u *UDPListener) udpListenLoop() { defer u.wg.Done() - buf := make([]byte, UDP_MAX_PACKET_SIZE) + buf := make([]byte, UDPMaxPacketSize) for { select { case <-u.done: return default: - u.listener.SetReadDeadline(time.Now().Add(time.Second)) + if err := u.listener.SetReadDeadline(time.Now().Add(time.Second)); err != nil { + u.Log.Error("setting read-deadline failed: " + err.Error()) + } n, _, err := u.listener.ReadFromUDP(buf) if err != nil { @@ -189,7 +195,7 @@ func (u *UdpListener) udpListenLoop() { } } -func (u *UdpListener) udpParser() error { +func (u *UDPListener) udpParser() { defer u.wg.Done() var packet []byte @@ -199,7 +205,7 @@ func (u *UdpListener) udpParser() error { select { case <-u.done: if len(u.in) == 0 { - return nil + return } case packet = <-u.in: metrics, err = u.parser.Parse(packet) @@ -219,7 +225,7 @@ func (u *UdpListener) udpParser() error { func init() { inputs.Add("udp_listener", func() telegraf.Input { - return &UdpListener{ + return &UDPListener{ ServiceAddress: ":8092", AllowedPendingMessages: 10000, } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index b241235e4d61d..3e36838c6192a 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -1,211 +1,201 @@ package udp_listener -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "strings" - "testing" - - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" -) - -const ( - testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" - - testMsgs = ` -cpu_load_short,host=server02 value=12.0 1422568543702900257 -cpu_load_short,host=server03 value=12.0 1422568543702900257 -cpu_load_short,host=server04 value=12.0 1422568543702900257 -cpu_load_short,host=server05 value=12.0 1422568543702900257 -cpu_load_short,host=server06 value=12.0 1422568543702900257 -` -) - -func newTestUdpListener() (*UdpListener, chan []byte) { - in := make(chan []byte, 1500) - listener := &UdpListener{ - Log: testutil.Logger{}, - ServiceAddress: ":8125", - AllowedPendingMessages: 10000, - in: in, - done: make(chan struct{}), - } - return listener, in -} - -// func TestHighTrafficUDP(t *testing.T) { -// listener := UdpListener{ -// ServiceAddress: ":8126", -// AllowedPendingMessages: 100000, +// This plugin will become officially deprecated in 2.0 +// These tests have been randomly failing the nightly tests, can't remove plugin until breaking changes are allowed to be merged +// See this issue for more information: https://github.com/influxdata/telegraf/issues/9478 + +// const ( +// testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + +// testMsgs = ` +// cpu_load_short,host=server02 value=12.0 1422568543702900257 +// cpu_load_short,host=server03 value=12.0 1422568543702900257 +// cpu_load_short,host=server04 value=12.0 1422568543702900257 +// cpu_load_short,host=server05 value=12.0 1422568543702900257 +// cpu_load_short,host=server06 value=12.0 1422568543702900257 +// ` +// ) + +// func newTestUDPListener() (*UDPListener, chan []byte) { +// in := make(chan []byte, 1500) +// listener := &UDPListener{ +// Log: testutil.Logger{}, +// ServiceAddress: ":8125", +// AllowedPendingMessages: 10000, +// in: in, +// done: make(chan struct{}), // } -// var err error -// listener.parser, err = parsers.NewInfluxParser() -// require.NoError(t, err) +// return listener, in +// } + +// // func TestHighTrafficUDP(t *testing.T) { +// // listener := UDPListener{ +// // ServiceAddress: ":8126", +// // AllowedPendingMessages: 100000, +// // } +// // var err error +// // listener.parser, err = parsers.NewInfluxParser() +// // require.NoError(t, err) +// // acc := &testutil.Accumulator{} + +// // // send multiple messages to socket +// // err = listener.Start(acc) +// // require.NoError(t, err) + +// // conn, err := net.Dial("udp", "127.0.0.1:8126") +// // require.NoError(t, err) +// // mlen := int64(len(testMsgs)) +// // var sent int64 +// // for i := 0; i < 20000; i++ { +// // for sent > listener.BytesRecv.Get()+32000 { +// // // more than 32kb sitting in OS buffer, let it drain +// // runtime.Gosched() +// // } +// // conn.Write([]byte(testMsgs)) +// // sent += mlen +// // } +// // for sent > listener.BytesRecv.Get() { +// // runtime.Gosched() +// // } +// // for len(listener.in) > 0 { +// // runtime.Gosched() +// // } +// // listener.Stop() + +// // assert.Equal(t, uint64(100000), acc.NMetrics()) +// // } + +// func TestConnectUDP(t *testing.T) { +// listener := UDPListener{ +// Log: testutil.Logger{}, +// ServiceAddress: ":8127", +// AllowedPendingMessages: 10000, +// } +// listener.parser, _ = parsers.NewInfluxParser() + // acc := &testutil.Accumulator{} +// require.NoError(t, listener.Start(acc)) +// defer listener.Stop() -// // send multiple messages to socket -// err = listener.Start(acc) +// conn, err := net.Dial("udp", "127.0.0.1:8127") // require.NoError(t, err) -// conn, err := net.Dial("udp", "127.0.0.1:8126") +// // send single message to socket +// _, err = fmt.Fprint(conn, testMsg) // require.NoError(t, err) -// mlen := int64(len(testMsgs)) -// var sent int64 -// for i := 0; i < 20000; i++ { -// for sent > listener.BytesRecv.Get()+32000 { -// // more than 32kb sitting in OS buffer, let it drain -// runtime.Gosched() -// } -// conn.Write([]byte(testMsgs)) -// sent += mlen -// } -// for sent > listener.BytesRecv.Get() { -// runtime.Gosched() +// acc.Wait(1) +// acc.AssertContainsTaggedFields(t, "cpu_load_short", +// map[string]interface{}{"value": float64(12)}, +// map[string]string{"host": "server01"}, +// ) + +// // send multiple messages to socket +// _, err = fmt.Fprint(conn, testMsgs) +// require.NoError(t, err) +// acc.Wait(6) +// hostTags := []string{"server02", "server03", +// "server04", "server05", "server06"} +// for _, hostTag := range hostTags { +// acc.AssertContainsTaggedFields(t, "cpu_load_short", +// map[string]interface{}{"value": float64(12)}, +// map[string]string{"host": hostTag}, +// ) // } -// for len(listener.in) > 0 { -// runtime.Gosched() +// } + +// func TestRunParser(t *testing.T) { +// log.SetOutput(io.Discard) +// var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n") + +// listener, in := newTestUDPListener() +// acc := testutil.Accumulator{} +// listener.acc = &acc +// defer close(listener.done) + +// listener.parser, _ = parsers.NewInfluxParser() +// listener.wg.Add(1) +// go listener.udpParser() + +// in <- testmsg +// require.NoError(t, listener.Gather(&acc)) + +// acc.Wait(1) +// acc.AssertContainsTaggedFields(t, "cpu_load_short", +// map[string]interface{}{"value": float64(12)}, +// map[string]string{"host": "server01"}, +// ) +// } + +// func TestRunParserInvalidMsg(_ *testing.T) { +// log.SetOutput(io.Discard) +// var testmsg = []byte("cpu_load_short") + +// listener, in := newTestUDPListener() +// acc := testutil.Accumulator{} +// listener.acc = &acc +// defer close(listener.done) + +// listener.parser, _ = parsers.NewInfluxParser() +// listener.wg.Add(1) +// go listener.udpParser() + +// buf := bytes.NewBuffer(nil) +// log.SetOutput(buf) +// defer log.SetOutput(os.Stderr) +// in <- testmsg + +// scnr := bufio.NewScanner(buf) +// for scnr.Scan() { +// if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) { +// break +// } // } -// listener.Stop() +// } + +// func TestRunParserGraphiteMsg(t *testing.T) { +// log.SetOutput(io.Discard) +// var testmsg = []byte("cpu.load.graphite 12 1454780029") + +// listener, in := newTestUDPListener() +// acc := testutil.Accumulator{} +// listener.acc = &acc +// defer close(listener.done) -// assert.Equal(t, uint64(100000), acc.NMetrics()) +// listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) +// listener.wg.Add(1) +// go listener.udpParser() + +// in <- testmsg +// require.NoError(t, listener.Gather(&acc)) + +// acc.Wait(1) +// acc.AssertContainsFields(t, "cpu_load_graphite", +// map[string]interface{}{"value": float64(12)}) // } -func TestConnectUDP(t *testing.T) { - listener := UdpListener{ - Log: testutil.Logger{}, - ServiceAddress: ":8127", - AllowedPendingMessages: 10000, - } - listener.parser, _ = parsers.NewInfluxParser() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - conn, err := net.Dial("udp", "127.0.0.1:8127") - require.NoError(t, err) - - // send single message to socket - fmt.Fprintf(conn, testMsg) - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01"}, - ) - - // send multiple messages to socket - fmt.Fprintf(conn, testMsgs) - acc.Wait(6) - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) - } -} - -func TestRunParser(t *testing.T) { - log.SetOutput(ioutil.Discard) - var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n") - - listener, in := newTestUdpListener() - acc := testutil.Accumulator{} - listener.acc = &acc - defer close(listener.done) - - listener.parser, _ = parsers.NewInfluxParser() - listener.wg.Add(1) - go listener.udpParser() - - in <- testmsg - listener.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01"}, - ) -} - -func TestRunParserInvalidMsg(t *testing.T) { - log.SetOutput(ioutil.Discard) - var testmsg = []byte("cpu_load_short") - - listener, in := newTestUdpListener() - acc := testutil.Accumulator{} - listener.acc = &acc - defer close(listener.done) - - listener.parser, _ = parsers.NewInfluxParser() - listener.wg.Add(1) - go listener.udpParser() - - buf := bytes.NewBuffer(nil) - log.SetOutput(buf) - defer log.SetOutput(os.Stderr) - in <- testmsg - - scnr := bufio.NewScanner(buf) - for scnr.Scan() { - if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) { - break - } - } -} - -func TestRunParserGraphiteMsg(t *testing.T) { - log.SetOutput(ioutil.Discard) - var testmsg = []byte("cpu.load.graphite 12 1454780029") - - listener, in := newTestUdpListener() - acc := testutil.Accumulator{} - listener.acc = &acc - defer close(listener.done) - - listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - listener.wg.Add(1) - go listener.udpParser() - - in <- testmsg - listener.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "cpu_load_graphite", - map[string]interface{}{"value": float64(12)}) -} - -func TestRunParserJSONMsg(t *testing.T) { - log.SetOutput(ioutil.Discard) - var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") - - listener, in := newTestUdpListener() - acc := testutil.Accumulator{} - listener.acc = &acc - defer close(listener.done) - - listener.parser, _ = parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "udp_json_test", - }) - listener.wg.Add(1) - go listener.udpParser() - - in <- testmsg - listener.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "udp_json_test", - map[string]interface{}{ - "a": float64(5), - "b_c": float64(6), - }) -} +// func TestRunParserJSONMsg(t *testing.T) { +// log.SetOutput(io.Discard) +// var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") + +// listener, in := newTestUDPListener() +// acc := testutil.Accumulator{} +// listener.acc = &acc +// defer close(listener.done) + +// listener.parser, _ = parsers.NewParser(&parsers.Config{ +// DataFormat: "json", +// MetricName: "udp_json_test", +// }) +// listener.wg.Add(1) +// go listener.udpParser() + +// in <- testmsg +// require.NoError(t, listener.Gather(&acc)) + +// acc.Wait(1) +// acc.AssertContainsFields(t, "udp_json_test", +// map[string]interface{}{ +// "a": float64(5), +// "b_c": float64(6), +// }) +// } diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index bb4ecde5860dd..72a9e4db5965d 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -12,28 +12,28 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) +type runner func(unbound Unbound) (*bytes.Buffer, error) // Unbound is used to store configuration values type Unbound struct { - Binary string - Timeout internal.Duration - UseSudo bool - Server string - ThreadAsTag bool - ConfigFile string - - filter filter.Filter - run runner + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` + Server string `toml:"server"` + ThreadAsTag bool `toml:"thread_as_tag"` + ConfigFile string `toml:"config_file"` + + run runner } var defaultBinary = "/usr/sbin/unbound-control" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## Address of server to connect to, read from unbound conf default, optionally ':port' @@ -71,26 +71,26 @@ func (s *Unbound) SampleConfig() string { } // Shell out to unbound_stat and return the output -func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) { +func unboundRunner(unbound Unbound) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} - if Server != "" { - host, port, err := net.SplitHostPort(Server) + if unbound.Server != "" { + host, port, err := net.SplitHostPort(unbound.Server) if err != nil { // No port was specified - host = Server + host = unbound.Server port = "" } // Unbound control requires an IP address, and we want to be nice to the user resolver := net.Resolver{} - ctx, lookUpCancel := context.WithTimeout(context.Background(), Timeout.Duration) + ctx, lookUpCancel := context.WithTimeout(context.Background(), time.Duration(unbound.Timeout)) defer lookUpCancel() serverIps, err := resolver.LookupIPAddr(ctx, host) if err != nil { - return nil, fmt.Errorf("error looking up ip for server: %s: %s", Server, err) + return nil, fmt.Errorf("error looking up ip for server: %s: %s", unbound.Server, err) } if len(serverIps) == 0 { - return nil, fmt.Errorf("error no ip for server: %s: %s", Server, err) + return nil, fmt.Errorf("error no ip for server: %s: %s", unbound.Server, err) } server := serverIps[0].IP.String() if port != "" { @@ -100,22 +100,22 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv cmdArgs = append([]string{"-s", server}, cmdArgs...) } - if ConfigFile != "" { - cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + if unbound.ConfigFile != "" { + cmdArgs = append([]string{"-c", unbound.ConfigFile}, cmdArgs...) } - cmd := exec.Command(cmdName, cmdArgs...) + cmd := exec.Command(unbound.Binary, cmdArgs...) - if UseSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) + if unbound.UseSudo { + cmdArgs = append([]string{unbound.Binary}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(unbound.Timeout)) if err != nil { - return &out, fmt.Errorf("error running unbound-control: %s (%s %v)", err, cmdName, cmdArgs) + return &out, fmt.Errorf("error running unbound-control: %s (%s %v)", err, unbound.Binary, cmdArgs) } return &out, nil @@ -125,7 +125,6 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv // // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Unbound) Gather(acc telegraf.Accumulator) error { - // Always exclude histogram statistics statExcluded := []string{"histogram.*"} filterExcluded, err := filter.Compile(statExcluded) @@ -133,7 +132,7 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { return err } - out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag, s.ConfigFile) + out, err := s.run(*s) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } @@ -144,7 +143,6 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { scanner := bufio.NewScanner(out) for scanner.Scan() { - cols := strings.Split(scanner.Text(), "=") // Check split correctness @@ -191,7 +189,6 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { field := strings.Replace(stat, ".", "_", -1) fields[field] = fieldValue } - } acc.AddFields("unbound", fields, nil) diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go index cc4b99daecc59..d3900602441f1 100644 --- a/plugins/inputs/unbound/unbound_test.go +++ b/plugins/inputs/unbound/unbound_test.go @@ -3,17 +3,13 @@ package unbound import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool, ConfigFile string) func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { +func UnboundControl(output string) func(unbound Unbound) (*bytes.Buffer, error) { + return func(unbound Unbound) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,7 +17,7 @@ func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Serv func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", false, ""), + run: UnboundControl(fullOutput), } err := v.Gather(acc) @@ -38,7 +34,7 @@ func TestParseFullOutput(t *testing.T) { func TestParseFullOutputThreadAsTag(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", true, ""), + run: UnboundControl(fullOutput), ThreadAsTag: true, } err := v.Gather(acc) diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index b13a7b3e6c5d3..f536e4b27c44f 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -15,14 +15,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Uwsgi server struct type Uwsgi struct { - Servers []string `toml:"servers"` - Timeout internal.Duration `toml:"timeout"` + Servers []string `toml:"servers"` + Timeout config.Duration `toml:"timeout"` client *http.Client } @@ -51,7 +51,7 @@ func (u *Uwsgi) SampleConfig() string { func (u *Uwsgi) Gather(acc telegraf.Accumulator) error { if u.client == nil { u.client = &http.Client{ - Timeout: u.Timeout.Duration, + Timeout: time.Duration(u.Timeout), } } wg := &sync.WaitGroup{} @@ -85,13 +85,13 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error { switch url.Scheme { case "tcp": - r, err = net.DialTimeout(url.Scheme, url.Host, u.Timeout.Duration) + r, err = net.DialTimeout(url.Scheme, url.Host, time.Duration(u.Timeout)) if err != nil { return err } s.source = url.Host case "unix": - r, err = net.DialTimeout(url.Scheme, url.Path, u.Timeout.Duration) + r, err = net.DialTimeout(url.Scheme, url.Path, time.Duration(u.Timeout)) if err != nil { return err } @@ -210,14 +210,13 @@ func (u *Uwsgi) gatherCores(acc telegraf.Accumulator, s *StatsServer) { } acc.AddFields("uwsgi_cores", fields, tags) } - } } func init() { inputs.Add("uwsgi", func() telegraf.Input { return &Uwsgi{ - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/uwsgi/uwsgi_test.go b/plugins/inputs/uwsgi/uwsgi_test.go index 34581791e022f..80856c5cffa73 100644 --- a/plugins/inputs/uwsgi/uwsgi_test.go +++ b/plugins/inputs/uwsgi/uwsgi_test.go @@ -122,7 +122,7 @@ func TestBasic(t *testing.T) { Servers: []string{fakeServer.URL + "/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 0, len(acc.Errors)) } @@ -153,7 +153,7 @@ func TestInvalidJSON(t *testing.T) { Servers: []string{fakeServer.URL + "/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } @@ -162,7 +162,7 @@ func TestHttpError(t *testing.T) { Servers: []string{"http://novalidurladress/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } @@ -171,7 +171,7 @@ func TestTcpError(t *testing.T) { Servers: []string{"tcp://novalidtcpadress/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } @@ -180,6 +180,6 @@ func TestUnixSocketError(t *testing.T) { Servers: []string{"unix:///novalidunixsocket"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index 893f00c0a8cdd..d9872b9d81af7 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish @@ -12,12 +13,13 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) +type runner func(cmdName string, useSudo bool, instanceName string, timeout config.Duration) (*bytes.Buffer, error) // Varnish is used to store configuration values type Varnish struct { @@ -25,7 +27,7 @@ type Varnish struct { Binary string UseSudo bool InstanceName string - Timeout internal.Duration + Timeout config.Duration filter filter.Filter run runner @@ -33,7 +35,7 @@ type Varnish struct { var defaultStats = []string{"MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"} var defaultBinary = "/usr/bin/varnishstat" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## If running as a restricted user you can prepend sudo for additional access: @@ -66,16 +68,16 @@ func (s *Varnish) SampleConfig() string { } // Shell out to varnish_stat and return the output -func varnishRunner(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) { +func varnishRunner(cmdName string, useSudo bool, instanceName string, timeout config.Duration) (*bytes.Buffer, error) { cmdArgs := []string{"-1"} - if InstanceName != "" { - cmdArgs = append(cmdArgs, []string{"-n", InstanceName}...) + if instanceName != "" { + cmdArgs = append(cmdArgs, []string{"-n", instanceName}...) } cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmdArgs = append([]string{"-n"}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) @@ -84,7 +86,7 @@ func varnishRunner(cmdName string, UseSudo bool, InstanceName string, Timeout in var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running varnishstat: %s", err) } @@ -149,8 +151,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { sectionMap[section][field], err = strconv.ParseUint(value, 10, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numeric value for %s = %v\n", - stat, value)) + acc.AddError(fmt.Errorf("expected a numeric value for %s = %v", stat, value)) } } diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 96e5c35562208..088c08378c1ef 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish @@ -7,15 +8,15 @@ import ( "fmt" "strings" "testing" - "time" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) -func fakeVarnishStat(output string, useSudo bool, InstanceName string, Timeout internal.Duration) func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { - return func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { +func fakeVarnishStat(output string) func(string, bool, string, config.Duration) (*bytes.Buffer, error) { + return func(string, bool, string, config.Duration) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -23,10 +24,10 @@ func fakeVarnishStat(output string, useSudo bool, InstanceName string, Timeout i func TestGather(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(smOutput, false, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(smOutput), Stats: []string{"*"}, } - v.Gather(acc) + assert.NoError(t, v.Gather(acc)) acc.HasMeasurement("varnish") for tag, fields := range parsedSmOutput { @@ -39,12 +40,11 @@ func TestGather(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(fullOutput), Stats: []string{"*"}, } - err := v.Gather(acc) + assert.NoError(t, v.Gather(acc)) - assert.NoError(t, err) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) assert.Len(t, acc.Metrics, 6) @@ -54,12 +54,11 @@ func TestParseFullOutput(t *testing.T) { func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, false, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(fullOutput), Stats: []string{"MGT.*", "VBE.*"}, } - err := v.Gather(acc) + assert.NoError(t, v.Gather(acc)) - assert.NoError(t, err) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) assert.Len(t, acc.Metrics, 2) @@ -77,12 +76,11 @@ func TestFieldConfig(t *testing.T) { for fieldCfg, expected := range expect { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(fullOutput), Stats: strings.Split(fieldCfg, ","), } - err := v.Gather(acc) + assert.NoError(t, v.Gather(acc)) - assert.NoError(t, err) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) assert.Equal(t, expected, len(flat)) @@ -94,7 +92,10 @@ func flatten(metrics []*testutil.Metric) map[string]interface{} { for _, m := range metrics { buf := &bytes.Buffer{} for k, v := range m.Tags { - buf.WriteString(fmt.Sprintf("%s=%s", k, v)) + _, err := buf.WriteString(fmt.Sprintf("%s=%s", k, v)) + if err != nil { + return nil + } } for k, v := range m.Fields { flat[fmt.Sprintf("%s %s", buf.String(), k)] = v diff --git a/plugins/inputs/varnish/varnish_windows.go b/plugins/inputs/varnish/varnish_windows.go index 0c85c106f2b4f..9fed7dfc2a3c8 100644 --- a/plugins/inputs/varnish/varnish_windows.go +++ b/plugins/inputs/varnish/varnish_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package varnish diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 108637bab05d7..7d73ea7e35855 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -8,7 +8,10 @@ The VMware vSphere plugin uses the vSphere API to gather metrics from multiple v * Datastores ## Supported versions of vSphere -This plugin supports vSphere version 5.5 through 6.7. + +This plugin supports vSphere version 6.5, 6.7 and 7.0. It may work with versions 5.1, 5.5 and 6.0, but neither are officially supported. + +Compatibility information was found [here](https://github.com/vmware/govmomi/tree/v0.26.0#compatibility) ## Configuration @@ -181,6 +184,12 @@ vm_metric_exclude = [ "*" ] ## preserve the full precision when averaging takes place. # use_int_samples = true + ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In + ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported + ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing + ## it too much may cause performance issues. + # metric_lookback = 3 + ## Custom attributes from vCenter can be very useful for queries in order to slice the ## metrics along different dimension and for forming ad-hoc relationships. They are disabled ## by default, since they can add a considerable amount of tags to the resulting metrics. To @@ -276,7 +285,7 @@ We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop vCenter keeps two different kinds of metrics, known as realtime and historical metrics. * Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. -* Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. +* Historical metrics: Available at a (default) 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the most granular rollup which defaults to 5 minutes but can be changed in vCenter to other interval durations. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html @@ -309,7 +318,7 @@ This will disrupt the metric collection and can result in missed samples. The be [[inputs.vsphere]] interval = "300s" - + vcenters = [ "https://someaddress/sdk" ] username = "someuser@vsphere.local" password = "secret" @@ -349,6 +358,11 @@ The vSphere plugin allows you to specify two concurrency settings: While a higher level of concurrency typically has a positive impact on performance, increasing these numbers too much can cause performance issues at the vCenter server. A rule of thumb is to set these parameters to the number of virtual machines divided by 1500 and rounded up to the nearest integer. +### Configuring historical_interval setting + +When the vSphere plugin queries vCenter for historical statistics it queries for statistics that exist at a specific interval. The default historical interval duration is 5 minutes but if this interval has been changed then you must override the default query interval in the vSphere plugin. +* ```historical_interval```: The interval of the most granular statistics configured in vSphere represented in seconds. + ## Measurements & Fields - Cluster Stats diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index b3096f7be300b..2795c94ae6698 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/influxdata/telegraf" "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/performance" @@ -20,6 +19,8 @@ import ( "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + + "github.com/influxdata/telegraf" ) // The highest number of metrics we can query for, no matter what settings @@ -30,10 +31,10 @@ const absoluteMaxMetrics = 10000 // a single Client is reused across all functions and goroutines, but the client // is periodically recycled to avoid authentication expiration issues. type ClientFactory struct { - client *Client - mux sync.Mutex - url *url.URL - parent *VSphere + client *Client + mux sync.Mutex + vSphereURL *url.URL + parent *VSphere } // Client represents a connection to vSphere and is backed by a govmomi connection @@ -49,11 +50,11 @@ type Client struct { } // NewClientFactory creates a new ClientFactory and prepares it for use. -func NewClientFactory(ctx context.Context, url *url.URL, parent *VSphere) *ClientFactory { +func NewClientFactory(vSphereURL *url.URL, parent *VSphere) *ClientFactory { return &ClientFactory{ - client: nil, - parent: parent, - url: url, + client: nil, + parent: parent, + vSphereURL: vSphereURL, } } @@ -66,7 +67,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { for { if cf.client == nil { var err error - if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil { + if cf.client, err = NewClient(ctx, cf.vSphereURL, cf.parent); err != nil { return nil, err } } @@ -74,11 +75,11 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { // Execute a dummy call against the server to make sure the client is // still functional. If not, try to log back in. If that doesn't work, // we give up. - ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout)) defer cancel1() if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!") - ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout)) defer cancel2() if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil { if !retrying { @@ -98,8 +99,8 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { } // NewClient creates a new vSphere client based on the url and setting passed as parameters. -func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { - sw := NewStopwatch("connect", u.Host) +func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, error) { + sw := NewStopwatch("connect", vSphereURL.Host) defer sw.Stop() tlsCfg, err := vs.ClientConfig.TLSConfig() @@ -111,14 +112,14 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { tlsCfg = &tls.Config{} } if vs.Username != "" { - u.User = url.UserPassword(vs.Username, vs.Password) + vSphereURL.User = url.UserPassword(vs.Username, vs.Password) } - vs.Log.Debugf("Creating client: %s", u.Host) - soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify) + vs.Log.Debugf("Creating client: %s", vSphereURL.Host) + soapClient := soap.NewClient(vSphereURL, tlsCfg.InsecureSkipVerify) // Add certificate if we have it. Use it to log us in. - if tlsCfg != nil && len(tlsCfg.Certificates) > 0 { + if len(tlsCfg.Certificates) > 0 { soapClient.SetCertificate(tlsCfg.Certificates[0]) } @@ -130,7 +131,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { } } - ctx1, cancel1 := context.WithTimeout(ctx, vs.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(vs.Timeout)) defer cancel1() vimClient, err := vim25.NewClient(ctx1, soapClient) if err != nil { @@ -140,7 +141,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { // If TSLKey is specified, try to log in as an extension using a cert. if vs.TLSKey != "" { - ctx2, cancel2 := context.WithTimeout(ctx, vs.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(vs.Timeout)) defer cancel2() if err := sm.LoginExtensionByCertificate(ctx2, vs.TLSKey); err != nil { return nil, err @@ -154,13 +155,13 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { } // Only login if the URL contains user information. - if u.User != nil { - if err := c.Login(ctx, u.User); err != nil { + if vSphereURL.User != nil { + if err := c.Login(ctx, vSphereURL.User); err != nil { return nil, err } } - c.Timeout = vs.Timeout.Duration + c.Timeout = time.Duration(vs.Timeout) m := view.NewManager(c.Client) v, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{}, true) @@ -177,10 +178,10 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { Root: v, Perf: p, Valid: true, - Timeout: vs.Timeout.Duration, + Timeout: time.Duration(vs.Timeout), } // Adjust max query size if needed - ctx3, cancel3 := context.WithTimeout(ctx, vs.Timeout.Duration) + ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(vs.Timeout)) defer cancel3() n, err := client.GetMaxQueryMetrics(ctx3) if err != nil { diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 6d77cb69dddca..9903647f8d4ee 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -29,15 +29,13 @@ var isIPv4 = regexp.MustCompile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$") var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") -const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics - -const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics - const maxSampleConst = 10 // Absolute maximum number of samples regardless of period const maxMetadataSamples = 100 // Number of resources to sample for metric metadata -const hwMarkTTL = time.Duration(4 * time.Hour) +const maxRealtimeMetrics = 50000 // Absolute maximum metrics per realtime query + +const hwMarkTTL = 4 * time.Hour type queryChunk []types.PerfQuerySpec @@ -124,7 +122,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra hwMarks: NewTSCache(hwMarkTTL), lun2ds: make(map[string]string), initialized: false, - clientFactory: NewClientFactory(ctx, url, parent), + clientFactory: NewClientFactory(url, parent), customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude), customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude), log: log, @@ -138,7 +136,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra parentTag: "", enabled: anythingEnabled(parent.DatacenterMetricExclude), realTime: false, - sampling: 300, + sampling: int32(time.Duration(parent.HistoricalInterval).Seconds()), objects: make(objectMap), filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), paths: parent.DatacenterInclude, @@ -156,7 +154,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra parentTag: "dcname", enabled: anythingEnabled(parent.ClusterMetricExclude), realTime: false, - sampling: 300, + sampling: int32(time.Duration(parent.HistoricalInterval).Seconds()), objects: make(objectMap), filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude), paths: parent.ClusterInclude, @@ -209,7 +207,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra pKey: "dsname", enabled: anythingEnabled(parent.DatastoreMetricExclude), realTime: false, - sampling: 300, + sampling: int32(time.Duration(parent.HistoricalInterval).Seconds()), objects: make(objectMap), filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), paths: parent.DatastoreInclude, @@ -258,7 +256,7 @@ func isSimple(include []string, exclude []string) bool { } func (e *Endpoint) startDiscovery(ctx context.Context) { - e.discoveryTicker = time.NewTicker(e.Parent.ObjectDiscoveryInterval.Duration) + e.discoveryTicker = time.NewTicker(time.Duration(e.Parent.ObjectDiscoveryInterval)) go func() { for { select { @@ -300,7 +298,7 @@ func (e *Endpoint) init(ctx context.Context) error { } } - if e.Parent.ObjectDiscoveryInterval.Duration > 0 { + if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 { e.Parent.Log.Debug("Running initial discovery") e.initalDiscovery(ctx) } @@ -308,7 +306,7 @@ func (e *Endpoint) init(ctx context.Context) error { return nil } -func (e *Endpoint) getMetricNameForId(id int32) string { +func (e *Endpoint) getMetricNameForID(id int32) string { e.metricNameMux.RLock() defer e.metricNameMux.RUnlock() return e.metricNameLookup[id] @@ -322,13 +320,13 @@ func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error { return err } - mn, err := client.CounterInfoByName(ctx) + mn, err := client.CounterInfoByKey(ctx) if err != nil { return err } e.metricNameLookup = make(map[int32]string) - for name, m := range mn { - e.metricNameLookup[m.Key] = name + for key, m := range mn { + e.metricNameLookup[key] = m.Name() } return nil } @@ -339,7 +337,7 @@ func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int return nil, err } - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() metrics, err := client.Perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling) if err != nil { @@ -367,7 +365,7 @@ func (e *Endpoint) getAncestorName(ctx context.Context, client *Client, resource path = append(path, here.Reference().String()) o := object.NewCommon(client.Client.Client, r) var result mo.ManagedEntity - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) if err != nil { @@ -429,7 +427,7 @@ func (e *Endpoint) discover(ctx context.Context) error { paths: res.paths, excludePaths: res.excludePaths} - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) objects, err := res.getObjects(ctx1, e, &rf) cancel1() if err != nil { @@ -437,7 +435,7 @@ func (e *Endpoint) discover(ctx context.Context) error { } // Fill in datacenter names where available (no need to do it for Datacenters) - if res.name != "Datacenter" { + if res.name != "datacenter" { for k, obj := range objects { if obj.parentRef != nil { obj.dcname, _ = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) @@ -468,8 +466,8 @@ func (e *Endpoint) discover(ctx context.Context) error { dss := newObjects["datastore"] l2d := make(map[string]string) for _, ds := range dss { - lunId := ds.altID - m := isolateLUN.FindStringSubmatch(lunId) + lunID := ds.altID + m := isolateLUN.FindStringSubmatch(lunID) if m != nil { l2d[m[1]] = ds.name } @@ -565,7 +563,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, } else { m.Instance = "" } - if res.filters.Match(e.getMetricNameForId(m.CounterId)) { + if res.filters.Match(e.getMetricNameForID(m.CounterId)) { mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m } } @@ -588,7 +586,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.Datacenter - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -609,7 +607,7 @@ func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (o func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.ClusterComputeResource - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -623,7 +621,7 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje // We're not interested in the immediate parent (a folder), but the data center. p, ok := cache[r.Parent.Value] if !ok { - ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel2() client, err := e.clientFactory.GetClient(ctx2) if err != nil { @@ -631,7 +629,7 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje } o := object.NewFolder(client.Client.Client, *r.Parent) var folder mo.Folder - ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel3() err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) if err != nil { @@ -679,7 +677,7 @@ func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectM func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.VirtualMachine - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -710,7 +708,7 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap ips := make(map[string][]string) for _, ip := range net.IpConfig.IpAddress { addr := ip.IpAddress - for _, ipType := range e.Parent.IpAddresses { + for _, ipType := range e.Parent.IPAddresses { if !(ipType == "ipv4" && isIPv4.MatchString(addr) || ipType == "ipv6" && isIPv6.MatchString(addr)) { continue @@ -769,7 +767,7 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.Datastore - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -777,18 +775,18 @@ func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (ob } m := make(objectMap) for _, r := range resources { - lunId := "" + lunID := "" if r.Info != nil { info := r.Info.GetDatastoreInfo() if info != nil { - lunId = info.Url + lunID = info.Url } } m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, - altID: lunId, + altID: lunID, customValues: e.loadCustomAttributes(&r.ManagedEntity), } } @@ -825,7 +823,6 @@ func (e *Endpoint) Close() { // Collect runs a round of data collections as specified in the configuration. func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error { - // If we never managed to do a discovery, collection will be a no-op. Therefore, // we need to check that a connection is available, or the collection will // silently fail. @@ -841,7 +838,7 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error } // If discovery interval is disabled (0), discover on each collection cycle - if e.Parent.ObjectDiscoveryInterval.Duration == 0 { + if time.Duration(e.Parent.ObjectDiscoveryInterval) == 0 { err := e.discover(ctx) if err != nil { return err @@ -874,7 +871,7 @@ func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pq }) } -func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job queryJob) { +func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, job queryJob) { te := NewThrottledExecutor(e.Parent.CollectConcurrency) maxMetrics := e.Parent.MaxQueryMetrics if maxMetrics < 1 { @@ -889,20 +886,20 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim } pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects) + numQs := 0 for _, object := range res.objects { - timeBuckets := make(map[int64]*types.PerfQuerySpec, 0) + timeBuckets := make(map[int64]*types.PerfQuerySpec) for metricIdx, metric := range res.metrics { - // Determine time of last successful collection - metricName := e.getMetricNameForId(metric.CounterId) + metricName := e.getMetricNameForID(metric.CounterId) if metricName == "" { e.log.Infof("Unable to find metric name for id %d. Skipping!", metric.CounterId) continue } start, ok := e.hwMarks.Get(object.ref.Value, metricName) if !ok { - start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) + start = latest.Add(time.Duration(-res.sampling) * time.Second * (time.Duration(e.Parent.MetricLookback) - 1)) } start = start.Truncate(20 * time.Second) // Truncate to maximum resolution @@ -924,9 +921,9 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // Add this metric to the bucket bucket.MetricId = append(bucket.MetricId, metric) - // Bucket filled to capacity? (Only applies to non real time) + // Bucket filled to capacity? // OR if we're past the absolute maximum limit - if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > 100000 { + if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > maxRealtimeMetrics { e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d", len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects)) @@ -943,16 +940,18 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // Handle data in time bucket and submit job if we've reached the maximum number of object. for _, bucket := range timeBuckets { pqs = append(pqs, *bucket) - if (!res.realTime && len(pqs) > e.Parent.MaxQueryObjects) || len(pqs) > 100000 { - e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, len(bucket.MetricId)) + numQs += len(bucket.MetricId) + if (!res.realTime && numQs > e.Parent.MaxQueryObjects) || numQs > maxRealtimeMetrics { + e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, numQs) submitChunkJob(ctx, te, job, pqs) pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects) + numQs = 0 } } } // Submit any jobs left in the queue if len(pqs) > 0 { - e.log.Debugf("Submitting job for %s: %d objects", res.name, len(pqs)) + e.log.Debugf("Submitting job for %s: %d objects, %d metrics", res.name, len(pqs), numQs) submitChunkJob(ctx, te, job, pqs) } @@ -1014,9 +1013,9 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc latestSample := time.Time{} // Divide workload into chunks and process them concurrently - e.chunkify(ctx, res, now, latest, acc, + e.chunkify(ctx, res, now, latest, func(chunk queryChunk) { - n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval) + n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, estInterval) e.log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) if err != nil { acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error())) @@ -1078,7 +1077,7 @@ func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, int return rInfo, rValues } -func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) { +func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, interval time.Duration) (int, time.Time, error) { e.log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs)) latestSample := time.Time{} count := 0 diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index e49bf80f33fe5..8414ad8d81285 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -99,6 +99,8 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, if err != nil { return err } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive defer v.Destroy(ctx) var content []types.ObjectContent @@ -117,6 +119,8 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, if err != nil { return err } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive defer v2.Destroy(ctx) err = v2.Retrieve(ctx, []string{resType}, fields, &content) if err != nil { diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go index 1be75d7605173..c312260c85b9b 100644 --- a/plugins/inputs/vsphere/tscache.go +++ b/plugins/inputs/vsphere/tscache.go @@ -27,7 +27,7 @@ func (t *TSCache) Purge() { defer t.mux.Unlock() n := 0 for k, v := range t.table { - if time.Now().Sub(v) > t.ttl { + if time.Since(v) > t.ttl { delete(t.table, k) n++ } diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 9bafcd92113c3..f587ab6aaba95 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/vmware/govmomi/vim25/soap" @@ -47,15 +47,17 @@ type VSphere struct { CustomAttributeInclude []string CustomAttributeExclude []string UseIntSamples bool - IpAddresses []string + IPAddresses []string + MetricLookback int MaxQueryObjects int MaxQueryMetrics int CollectConcurrency int DiscoverConcurrency int ForceDiscoverOnInit bool - ObjectDiscoveryInterval internal.Duration - Timeout internal.Duration + ObjectDiscoveryInterval config.Duration + Timeout config.Duration + HistoricalInterval config.Duration endpoints []*Endpoint cancel context.CancelFunc @@ -237,12 +239,22 @@ var sampleConfig = ` # custom_attribute_include = [] # custom_attribute_exclude = ["*"] + ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In + ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported + ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing + ## it too much may cause performance issues. + # metric_lookback = 3 + ## Optional SSL Config # ssl_ca = "/path/to/cafile" # ssl_cert = "/path/to/certfile" # ssl_key = "/path/to/keyfile" ## Use SSL but skip chain & host verification # insecure_skip_verify = false + + ## The Historical Interval value must match EXACTLY the interval in the daily + # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals + # historical_interval = "5m" ` // SampleConfig returns a set of default configuration to be used as a boilerplate when setting up @@ -258,7 +270,7 @@ func (v *VSphere) Description() string { // Start is called from telegraf core when a plugin is started and allows it to // perform initialization tasks. -func (v *VSphere) Start(acc telegraf.Accumulator) error { +func (v *VSphere) Start(_ telegraf.Accumulator) error { v.Log.Info("Starting plugin") ctx, cancel := context.WithCancel(context.Background()) v.cancel = cancel @@ -315,7 +327,6 @@ func (v *VSphere) Gather(acc telegraf.Accumulator) error { defer wg.Done() err := endpoint.Collect(context.Background(), acc) if err == context.Canceled { - // No need to signal errors if we were merely canceled. err = nil } @@ -358,15 +369,17 @@ func init() { CustomAttributeInclude: []string{}, CustomAttributeExclude: []string{"*"}, UseIntSamples: true, - IpAddresses: []string{}, + IPAddresses: []string{}, MaxQueryObjects: 256, MaxQueryMetrics: 256, CollectConcurrency: 1, DiscoverConcurrency: 1, + MetricLookback: 3, ForceDiscoverOnInit: true, - ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, - Timeout: internal.Duration{Duration: time.Second * 60}, + ObjectDiscoveryInterval: config.Duration(time.Second * 300), + Timeout: config.Duration(time.Second * 60), + HistoricalInterval: config.Duration(time.Second * 300), } }) } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 20e26d293bece..31bb0fdf08844 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -11,7 +11,7 @@ import ( "time" "unsafe" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" itls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" @@ -147,12 +147,13 @@ func defaultVSphere() *VSphere { MaxQueryObjects: 256, MaxQueryMetrics: 256, - ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, - Timeout: internal.Duration{Duration: time.Second * 20}, + ObjectDiscoveryInterval: config.Duration(time.Second * 300), + Timeout: config.Duration(time.Second * 20), ForceDiscoverOnInit: true, DiscoverConcurrency: 1, CollectConcurrency: 1, Separator: ".", + HistoricalInterval: config.Duration(time.Second * 300), } } @@ -224,12 +225,16 @@ func TestParseConfig(t *testing.T) { v := VSphere{} c := v.SampleConfig() p := regexp.MustCompile("\n#") - fmt.Printf("Source=%s", p.ReplaceAllLiteralString(c, "\n")) c = configHeader + "\n[[inputs.vsphere]]\n" + p.ReplaceAllLiteralString(c, "\n") - fmt.Printf("Source=%s", c) tab, err := toml.Parse([]byte(c)) require.NoError(t, err) require.NotNil(t, tab) + +} + +func TestConfigDurationParsing(t *testing.T) { + v := defaultVSphere() + require.Equal(t, int32(300), int32(time.Duration(v.HistoricalInterval).Seconds()), "HistoricalInterval.Seconds() with default duration should resolve 300") } func TestMaxQuery(t *testing.T) { @@ -471,7 +476,6 @@ func testCollection(t *testing.T, excludeClusters bool) { v.Username = username v.Password = password } else { - // Don't run test on 32-bit machines due to bug in simulator. // https://github.com/vmware/govmomi/issues/1330 var i int @@ -513,16 +517,17 @@ func testCollection(t *testing.T, excludeClusters bool) { // We have to follow the host parent path to locate a cluster. Look up the host! finder := Finder{client} var hosts []mo.HostSystem - finder.Find(context.Background(), "HostSystem", "/**/"+hostName, &hosts) + err := finder.Find(context.Background(), "HostSystem", "/**/"+hostName, &hosts) + require.NoError(t, err) require.NotEmpty(t, hosts) hostMoid = hosts[0].Reference().Value hostCache[hostName] = hostMoid } - if isInCluster(t, v, client, cache, "HostSystem", hostMoid) { // If the VM lives in a cluster + if isInCluster(v, client, cache, "HostSystem", hostMoid) { // If the VM lives in a cluster mustContainAll(t, m.Tags, []string{"clustername"}) } } else if strings.HasPrefix(m.Measurement, "vsphere.host.") { - if isInCluster(t, v, client, cache, "HostSystem", m.Tags["moid"]) { // If the host lives in a cluster + if isInCluster(v, client, cache, "HostSystem", m.Tags["moid"]) { // If the host lives in a cluster mustContainAll(t, m.Tags, []string{"esxhostname", "clustername", "moid", "dcname"}) } else { mustContainAll(t, m.Tags, []string{"esxhostname", "moid", "dcname"}) @@ -536,7 +541,7 @@ func testCollection(t *testing.T, excludeClusters bool) { require.Empty(t, mustHaveMetrics, "Some metrics were not found") } -func isInCluster(t *testing.T, v *VSphere, client *Client, cache map[string]string, resourceKind, moid string) bool { +func isInCluster(v *VSphere, client *Client, cache map[string]string, resourceKind, moid string) bool { ctx := context.Background() ref := types.ManagedObjectReference{ Type: resourceKind, diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks.go b/plugins/inputs/webhooks/filestack/filestack_webhooks.go index 19f8c0251bbb7..44def8c6f5141 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks.go @@ -2,7 +2,7 @@ package filestack import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulato func (fs *FilestackWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go b/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go index 93f976f6074be..74d697b2cb0a6 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go @@ -5,7 +5,7 @@ import "strconv" type FilestackEvent struct { Action string `json:"action"` TimeStamp int64 `json:"timestamp"` - Id int `json:"id"` + ID int `json:"id"` } func (fe *FilestackEvent) Tags() map[string]string { @@ -16,6 +16,6 @@ func (fe *FilestackEvent) Tags() map[string]string { func (fe *FilestackEvent) Fields() map[string]interface{} { return map[string]interface{}{ - "id": strconv.Itoa(fe.Id), + "id": strconv.Itoa(fe.ID), } } diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 0bb792bf5df08..2d48cbef2e5f2 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -5,7 +5,7 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "io/ioutil" + "io" "log" "net/http" @@ -28,7 +28,7 @@ func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() eventType := r.Header.Get("X-Github-Event") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return @@ -126,7 +126,9 @@ func checkSignature(secret string, data []byte, signature string) bool { func generateSignature(secret string, data []byte) string { mac := hmac.New(sha1.New, []byte(secret)) - mac.Write(data) + if _, err := mac.Write(data); err != nil { + return err.Error() + } result := mac.Sum(nil) return "sha1=" + hex.EncodeToString(result) } diff --git a/plugins/inputs/webhooks/github/github_webhooks_models.go b/plugins/inputs/webhooks/github/github_webhooks_models.go index 4c15ac6c2907a..497d3f13c575e 100644 --- a/plugins/inputs/webhooks/github/github_webhooks_models.go +++ b/plugins/inputs/webhooks/github/github_webhooks_models.go @@ -2,7 +2,6 @@ package github import ( "fmt" - "log" "time" "github.com/influxdata/telegraf" @@ -107,10 +106,7 @@ func (s CommitCommentEvent) NewMetric() telegraf.Metric { "commit": s.Comment.Commit, "comment": s.Comment.Body, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -137,10 +133,7 @@ func (s CreateEvent) NewMetric() telegraf.Metric { "ref": s.Ref, "refType": s.RefType, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -167,10 +160,7 @@ func (s DeleteEvent) NewMetric() telegraf.Metric { "ref": s.Ref, "refType": s.RefType, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -198,10 +188,7 @@ func (s DeploymentEvent) NewMetric() telegraf.Metric { "environment": s.Deployment.Environment, "description": s.Deployment.Description, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -232,10 +219,7 @@ func (s DeploymentStatusEvent) NewMetric() telegraf.Metric { "depState": s.DeploymentStatus.State, "depDescription": s.DeploymentStatus.Description, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -260,10 +244,7 @@ func (s ForkEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "fork": s.Forkee.Repository, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -288,10 +269,7 @@ func (s GollumEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -320,10 +298,7 @@ func (s IssueCommentEvent) NewMetric() telegraf.Metric { "comments": s.Issue.Comments, "body": s.Comment.Body, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -352,10 +327,7 @@ func (s IssuesEvent) NewMetric() telegraf.Metric { "title": s.Issue.Title, "comments": s.Issue.Comments, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -381,10 +353,7 @@ func (s MemberEvent) NewMetric() telegraf.Metric { "newMember": s.Member.User, "newMemberStatus": s.Member.Admin, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -407,10 +376,7 @@ func (s MembershipEvent) NewMetric() telegraf.Metric { "newMember": s.Member.User, "newMemberStatus": s.Member.Admin, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -433,10 +399,7 @@ func (s PageBuildEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -459,10 +422,7 @@ func (s PublicEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -496,10 +456,7 @@ func (s PullRequestEvent) NewMetric() telegraf.Metric { "deletions": s.PullRequest.Deletions, "changedFiles": s.PullRequest.ChangedFiles, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -534,10 +491,7 @@ func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric { "commentFile": s.Comment.File, "comment": s.Comment.Comment, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -566,10 +520,7 @@ func (s PushEvent) NewMetric() telegraf.Metric { "before": s.Before, "after": s.After, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -594,10 +545,7 @@ func (s ReleaseEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "tagName": s.Release.TagName, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -620,10 +568,7 @@ func (s RepositoryEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -650,10 +595,7 @@ func (s StatusEvent) NewMetric() telegraf.Metric { "commit": s.Commit, "state": s.State, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -678,10 +620,7 @@ func (s TeamAddEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "teamName": s.Team.Name, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } @@ -704,9 +643,6 @@ func (s WatchEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m := metric.New(meas, t, f, time.Now()) return m } diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index 4a14c88947f97..67ba86908d1a1 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -2,7 +2,7 @@ package mandrill import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -25,13 +25,13 @@ func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator md.acc = acc } -func (md *MandrillWebhook) returnOK(w http.ResponseWriter, r *http.Request) { +func (md *MandrillWebhook) returnOK(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) } func (md *MandrillWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go index b36b13e541eef..242130545a5ae 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go @@ -8,7 +8,7 @@ type Event interface { type MandrillEvent struct { EventName string `json:"event"` TimeStamp int64 `json:"ts"` - Id string `json:"_id"` + ID string `json:"_id"` } func (me *MandrillEvent) Tags() map[string]string { @@ -19,6 +19,6 @@ func (me *MandrillEvent) Tags() map[string]string { func (me *MandrillEvent) Fields() map[string]interface{} { return map[string]interface{}{ - "id": me.Id, + "id": me.ID, } } diff --git a/plugins/inputs/webhooks/papertrail/README.md b/plugins/inputs/webhooks/papertrail/README.md index a3463dcaa6f8b..3f9c33ec5320c 100644 --- a/plugins/inputs/webhooks/papertrail/README.md +++ b/plugins/inputs/webhooks/papertrail/README.md @@ -14,6 +14,23 @@ Events from Papertrail come in two forms: * Each point has a field counter (`count`), which is set to `1` (signifying the event occurred) * Each event "hostname" object is converted to a `host` tag * The "saved_search" name in the payload is added as an `event` tag + * The "saved_search" id in the payload is added as a `search_id` field + * The papertrail url to view the event is built and added as a `url` field + * The rest of the data in the event is converted directly to fields on the point: + * `id` + * `source_ip` + * `source_name` + * `source_id` + * `program` + * `severity` + * `facility` + * `message` + +When a callback is received, an event-based point will look similar to: + +``` +papertrail,host=myserver.example.com,event=saved_search_name count=1i,source_name="abc",program="CROND",severity="Info",source_id=2i,message="message body",source_ip="208.75.57.121",id=7711561783320576i,facility="Cron",url="https://papertrailapp.com/searches/42?centered_on_id=7711561783320576",search_id=42i 1453248892000000000 +``` * The [count-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#count-only-webhooks) @@ -22,10 +39,7 @@ Events from Papertrail come in two forms: * Each count "source_name" object is converted to a `host` tag * The "saved_search" name in the payload is added as an `event` tag -The current functionality is very basic, however this allows you to -track the number of events by host and saved search. - -When an event is received, any point will look similar to: +When a callback is received, a count-based point will look similar to: ``` papertrail,host=myserver.example.com,event=saved_search_name count=3i 1453248892000000000 diff --git a/plugins/inputs/webhooks/papertrail/papertrail_test.go b/plugins/inputs/webhooks/papertrail/papertrail_test.go index 14b8aec895c98..6cba6730c9486 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_test.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_test.go @@ -67,8 +67,32 @@ func TestEventPayload(t *testing.T) { resp := post(pt, contentType, form.Encode()) require.Equal(t, http.StatusOK, resp.Code) - fields := map[string]interface{}{ - "count": uint64(1), + fields1 := map[string]interface{}{ + "count": uint64(1), + "id": int64(7711561783320576), + "source_ip": "208.75.57.121", + "source_name": "abc", + "source_id": int64(2), + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "message body", + "url": "https://papertrailapp.com/searches/42?centered_on_id=7711561783320576", + "search_id": int64(42), + } + + fields2 := map[string]interface{}{ + "count": uint64(1), + "id": int64(7711562567655424), + "source_ip": "208.75.57.120", + "source_name": "server1", + "source_id": int64(19), + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "A short event", + "url": "https://papertrailapp.com/searches/42?centered_on_id=7711562567655424", + "search_id": int64(42), } tags1 := map[string]string{ @@ -80,8 +104,8 @@ func TestEventPayload(t *testing.T) { "host": "def", } - acc.AssertContainsTaggedFields(t, "papertrail", fields, tags1) - acc.AssertContainsTaggedFields(t, "papertrail", fields, tags2) + acc.AssertContainsTaggedFields(t, "papertrail", fields1, tags1) + acc.AssertContainsTaggedFields(t, "papertrail", fields2, tags2) } func TestCountPayload(t *testing.T) { diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go index 42453c1309d93..5aa8ecaf83fc2 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go @@ -2,6 +2,7 @@ package papertrail import ( "encoding/json" + "fmt" "log" "net/http" "time" @@ -41,7 +42,6 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request } if payload.Events != nil { - // Handle event-based payload for _, e := range payload.Events { // Warning: Duplicate event timestamps will overwrite each other @@ -50,13 +50,21 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request "event": payload.SavedSearch.Name, } fields := map[string]interface{}{ - "count": uint64(1), + "count": uint64(1), + "id": e.ID, + "source_ip": e.SourceIP, + "source_name": e.SourceName, + "source_id": int64(e.SourceID), + "program": e.Program, + "severity": e.Severity, + "facility": e.Facility, + "message": e.Message, + "url": fmt.Sprintf("%s?centered_on_id=%d", payload.SavedSearch.SearchURL, e.ID), + "search_id": payload.SavedSearch.ID, } pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt) } - } else if payload.Counts != nil { - // Handle count-based payload for _, c := range payload.Counts { for ts, count := range *c.TimeSeries { diff --git a/plugins/inputs/webhooks/particle/particle_webhooks.go b/plugins/inputs/webhooks/particle/particle_webhooks.go index aa3499935f49a..ad93ea7c56477 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks.go @@ -14,7 +14,7 @@ type event struct { Data data `json:"data"` TTL int `json:"ttl"` PublishedAt string `json:"published_at"` - Database string `json:"measurement"` + Measurement string `json:"measurement"` } type data struct { @@ -59,6 +59,12 @@ func (rb *ParticleWebhook) eventHandler(w http.ResponseWriter, r *http.Request) pTime = time.Now() } - rb.acc.AddFields(e.Name, e.Data.Fields, e.Data.Tags, pTime) + // Use 'measurement' event field as the measurement, or default to the event name. + measurementName := e.Measurement + if measurementName == "" { + measurementName = e.Name + } + + rb.acc.AddFields(measurementName, e.Data.Fields, e.Data.Tags, pTime) w.WriteHeader(http.StatusOK) } diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_test.go index dc6213367dda9..c00d49fbb28a6 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks_test.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks_test.go @@ -44,7 +44,7 @@ func TestNewItem(t *testing.T) { "location": "TravelingWilbury", } - acc.AssertContainsTaggedFields(t, "temperature", fields, tags) + acc.AssertContainsTaggedFields(t, "mydata", fields, tags) } func TestUnknowItem(t *testing.T) { @@ -57,6 +57,50 @@ func TestUnknowItem(t *testing.T) { } } +func TestDefaultMeasurementName(t *testing.T) { + t.Parallel() + var acc testutil.Accumulator + rb := &ParticleWebhook{Path: "/particle", acc: &acc} + resp := postWebhooks(rb, BlankMeasurementJSON()) + if resp.Code != http.StatusOK { + t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) + } + + fields := map[string]interface{}{ + "temp_c": 26.680000, + } + + tags := map[string]string{ + "id": "230035001147343438323536", + } + + acc.AssertContainsTaggedFields(t, "eventName", fields, tags) +} + +func BlankMeasurementJSON() string { + return ` + { + "event": "eventName", + "data": { + "tags": { + "id": "230035001147343438323536" + }, + "values": { + "temp_c": 26.680000 + } + }, + "ttl": 60, + "published_at": "2017-09-28T21:54:10.897Z", + "coreid": "123456789938323536", + "userid": "1234ee123ac8e5ec1231a123d", + "version": 10, + "public": false, + "productID": 1234, + "name": "sensor", + "measurement": "" + }` +} + func NewItemJSON() string { return ` { diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go index 55ff7eb2f3594..d9c1323cdd608 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go @@ -3,7 +3,7 @@ package rollbar import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (rb *RollbarWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go index b9a3a0713cc16..ad5c54a037ecd 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go @@ -17,9 +17,9 @@ type NewItemDataItemLastOccurence struct { } type NewItemDataItem struct { - Id int `json:"id"` + ID int `json:"id"` Environment string `json:"environment"` - ProjectId int `json:"project_id"` + ProjectID int `json:"project_id"` LastOccurence NewItemDataItemLastOccurence `json:"last_occurrence"` } @@ -36,7 +36,7 @@ func (ni *NewItem) Tags() map[string]string { return map[string]string{ "event": ni.EventName, "environment": ni.Data.Item.Environment, - "project_id": strconv.Itoa(ni.Data.Item.ProjectId), + "project_id": strconv.Itoa(ni.Data.Item.ProjectID), "language": ni.Data.Item.LastOccurence.Language, "level": ni.Data.Item.LastOccurence.Level, } @@ -44,7 +44,7 @@ func (ni *NewItem) Tags() map[string]string { func (ni *NewItem) Fields() map[string]interface{} { return map[string]interface{}{ - "id": ni.Data.Item.Id, + "id": ni.Data.Item.ID, } } @@ -54,9 +54,9 @@ type OccurrenceDataOccurrence struct { } type OccurrenceDataItem struct { - Id int `json:"id"` + ID int `json:"id"` Environment string `json:"environment"` - ProjectId int `json:"project_id"` + ProjectID int `json:"project_id"` } type OccurrenceData struct { @@ -73,7 +73,7 @@ func (o *Occurrence) Tags() map[string]string { return map[string]string{ "event": o.EventName, "environment": o.Data.Item.Environment, - "project_id": strconv.Itoa(o.Data.Item.ProjectId), + "project_id": strconv.Itoa(o.Data.Item.ProjectID), "language": o.Data.Occurrence.Language, "level": o.Data.Occurrence.Level, } @@ -81,14 +81,14 @@ func (o *Occurrence) Tags() map[string]string { func (o *Occurrence) Fields() map[string]interface{} { return map[string]interface{}{ - "id": o.Data.Item.Id, + "id": o.Data.Item.ID, } } type DeployDataDeploy struct { - Id int `json:"id"` + ID int `json:"id"` Environment string `json:"environment"` - ProjectId int `json:"project_id"` + ProjectID int `json:"project_id"` } type DeployData struct { @@ -104,12 +104,12 @@ func (ni *Deploy) Tags() map[string]string { return map[string]string{ "event": ni.EventName, "environment": ni.Data.Deploy.Environment, - "project_id": strconv.Itoa(ni.Data.Deploy.ProjectId), + "project_id": strconv.Itoa(ni.Data.Deploy.ProjectID), } } func (ni *Deploy) Fields() map[string]interface{} { return map[string]interface{}{ - "id": ni.Data.Deploy.Id, + "id": ni.Data.Deploy.ID, } } diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index 4baaf6ffb0463..a6f02beffd5d8 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -2,7 +2,6 @@ package webhooks import ( "fmt" - "log" "net" "net/http" "reflect" @@ -28,14 +27,16 @@ func init() { } type Webhooks struct { - ServiceAddress string + ServiceAddress string `toml:"service_address"` - Github *github.GithubWebhook - Filestack *filestack.FilestackWebhook - Mandrill *mandrill.MandrillWebhook - Rollbar *rollbar.RollbarWebhook - Papertrail *papertrail.PapertrailWebhook - Particle *particle.ParticleWebhook + Github *github.GithubWebhook `toml:"github"` + Filestack *filestack.FilestackWebhook `toml:"filestack"` + Mandrill *mandrill.MandrillWebhook `toml:"mandrill"` + Rollbar *rollbar.RollbarWebhook `toml:"rollbar"` + Papertrail *papertrail.PapertrailWebhook `toml:"papertrail"` + Particle *particle.ParticleWebhook `toml:"particle"` + + Log telegraf.Logger `toml:"-"` srv *http.Server } @@ -108,27 +109,27 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error { wb.srv = &http.Server{Handler: r} - ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress)) + ln, err := net.Listen("tcp", wb.ServiceAddress) if err != nil { - log.Fatalf("E! Error starting server: %v", err) - return err - + return fmt.Errorf("error starting server: %v", err) } go func() { if err := wb.srv.Serve(ln); err != nil { if err != http.ErrServerClosed { - acc.AddError(fmt.Errorf("E! Error listening: %v", err)) + acc.AddError(fmt.Errorf("error listening: %v", err)) } } }() - log.Printf("I! Started the webhooks service on %s\n", wb.ServiceAddress) + wb.Log.Infof("Started the webhooks service on %s", wb.ServiceAddress) return nil } -func (rb *Webhooks) Stop() { - rb.srv.Close() - log.Println("I! Stopping the Webhooks service") +func (wb *Webhooks) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive + wb.srv.Close() + wb.Log.Infof("Stopping the Webhooks service") } diff --git a/plugins/inputs/win_eventlog/README.md b/plugins/inputs/win_eventlog/README.md index e3c48656f79c9..97c5cdd79a8a6 100644 --- a/plugins/inputs/win_eventlog/README.md +++ b/plugins/inputs/win_eventlog/README.md @@ -72,6 +72,10 @@ Telegraf minimum version: Telegraf 1.16.0 ## Get only first line of Message field. For most events first line is usually more than enough only_first_line_of_message = true + ## Parse timestamp from TimeCreated.SystemTime event field. + ## Will default to current time of telegraf processing on parsing error or if set to false + timestamp_from_event = true + ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] @@ -79,7 +83,7 @@ Telegraf minimum version: Telegraf 1.16.0 event_fields = ["*"] ## Fields to exclude. Also applied to data fields. Globbing supported - exclude_fields = ["Binary", "Data_Address*"] + exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported exclude_empty = ["*ActivityID", "UserID"] @@ -154,6 +158,8 @@ Fields `Level`, `Opcode` and `Task` are converted to text and saved as computed `Message` field is rendered from the event data, and can be several kilobytes of text with line breaks. For most events the first line of this text is more then enough, and additional info is more useful to be parsed as XML fields. So, for brevity, plugin takes only the first line. You can set `only_first_line_of_message` parameter to `false` to take full message text. +`TimeCreated` field is a string in RFC3339Nano format. By default Telegraf parses it as an event timestamp. If there is a field parse error or `timestamp_from_event` configration parameter is set to `false`, then event timestamp will be set to the exact time when Telegraf has parsed this event, so it will be rounded to the nearest minute. + ### Additional Fields The content of **Event Data** and **User Data** XML Nodes can be added as additional fields, and is added by default. You can disable that by setting `process_userdata` or `process_eventdata` parameters to `false`. diff --git a/plugins/inputs/win_eventlog/event.go b/plugins/inputs/win_eventlog/event.go index 2169ce8b490b3..86ddefdcb95e0 100644 --- a/plugins/inputs/win_eventlog/event.go +++ b/plugins/inputs/win_eventlog/event.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/syscall_windows.go b/plugins/inputs/win_eventlog/syscall_windows.go index df02913eee2af..d7bc07d0a5d42 100644 --- a/plugins/inputs/win_eventlog/syscall_windows.go +++ b/plugins/inputs/win_eventlog/syscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/util.go b/plugins/inputs/win_eventlog/util.go index f085c3c055f5c..276e7514228e0 100644 --- a/plugins/inputs/win_eventlog/util.go +++ b/plugins/inputs/win_eventlog/util.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages @@ -100,7 +101,6 @@ func UnrollXMLFields(data []byte, fieldsUsage map[string]int, separator string) break } if err != nil { - // log.Fatal(err) break } var parents []string diff --git a/plugins/inputs/win_eventlog/util_test.go b/plugins/inputs/win_eventlog/util_test.go index ce7428dd391d2..1dc90cc2326d3 100644 --- a/plugins/inputs/win_eventlog/util_test.go +++ b/plugins/inputs/win_eventlog/util_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/win_eventlog.go b/plugins/inputs/win_eventlog/win_eventlog.go index 376ef4169d902..2ee303d483530 100644 --- a/plugins/inputs/win_eventlog/win_eventlog.go +++ b/plugins/inputs/win_eventlog/win_eventlog.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages @@ -13,6 +14,7 @@ import ( "reflect" "strings" "syscall" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -80,6 +82,10 @@ var sampleConfig = ` ## Get only first line of Message field. For most events first line is usually more than enough only_first_line_of_message = true + ## Parse timestamp from TimeCreated.SystemTime event field. + ## Will default to current time of telegraf processing on parsing error or if set to false + timestamp_from_event = true + ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] @@ -87,7 +93,7 @@ var sampleConfig = ` event_fields = ["*"] ## Fields to exclude. Also applied to data fields. Globbing supported - exclude_fields = ["Binary", "Data_Address*"] + exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported exclude_empty = ["*ActivityID", "UserID"] @@ -102,6 +108,7 @@ type WinEventLog struct { ProcessEventData bool `toml:"process_eventdata"` Separator string `toml:"separator"` OnlyFirstLineOfMessage bool `toml:"only_first_line_of_message"` + TimeStampFromEvent bool `toml:"timestamp_from_event"` EventTags []string `toml:"event_tags"` EventFields []string `toml:"event_fields"` ExcludeFields []string `toml:"exclude_fields"` @@ -157,6 +164,7 @@ loop: tags := map[string]string{} fields := map[string]interface{}{} evt := reflect.ValueOf(&event).Elem() + timeStamp := time.Now() // Walk through all fields of Event struct to process System tags or fields for i := 0; i < evt.NumField(); i++ { fieldName := evt.Type().Field(i).Name @@ -181,6 +189,12 @@ loop: case "TimeCreated": fieldValue = event.TimeCreated.SystemTime fieldType = reflect.TypeOf(fieldValue).String() + if w.TimeStampFromEvent { + timeStamp, err = time.Parse(time.RFC3339Nano, fmt.Sprintf("%v", fieldValue)) + if err != nil { + w.Log.Warnf("Error parsing timestamp %q: %v", fieldValue, err) + } + } case "Correlation": if should, _ := w.shouldProcessField("ActivityID"); should { activityID := event.Correlation.ActivityID @@ -258,7 +272,7 @@ loop: } // Pass collected metrics - acc.AddFields("win_eventlog", fields, tags) + acc.AddFields("win_eventlog", fields, tags, timeStamp) } } @@ -510,6 +524,7 @@ func init() { ProcessEventData: true, Separator: "_", OnlyFirstLineOfMessage: true, + TimeStampFromEvent: true, EventTags: []string{"Source", "EventID", "Level", "LevelText", "Keywords", "Channel", "Computer"}, EventFields: []string{"*"}, ExcludeEmpty: []string{"Task", "Opcode", "*ActivityID", "UserID"}, diff --git a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go index 005077aa64c7d..e78ad6133b367 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go +++ b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows //revive:disable-next-line:var-naming diff --git a/plugins/inputs/win_eventlog/win_eventlog_test.go b/plugins/inputs/win_eventlog/win_eventlog_test.go index 9f922431ed776..bd6a434f40088 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_test.go +++ b/plugins/inputs/win_eventlog/win_eventlog_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/zsyscall_windows.go b/plugins/inputs/win_eventlog/zsyscall_windows.go index 5c7b0a504b0bf..34c17471691e8 100644 --- a/plugins/inputs/win_eventlog/zsyscall_windows.go +++ b/plugins/inputs/win_eventlog/zsyscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index 1bb4bcb34a7ff..de45386a764a1 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -65,7 +65,7 @@ Example: _Deprecated. Necessary features on Windows Vista and newer are checked dynamically_ -Bool, if set to `true`, the plugin will use the localized PerfCounter interface that has been present since before Vista for backwards compatability. +Bool, if set to `true`, the plugin will use the localized PerfCounter interface that has been present since before Vista for backwards compatibility. It is recommended NOT to use this on OSes starting with Vista and newer because it requires more configuration to use this than the newer interface present since Vista. diff --git a/plugins/inputs/win_perf_counters/kernel32.go b/plugins/inputs/win_perf_counters/kernel32.go index 9cdadedc873bd..09cbd4be5f182 100644 --- a/plugins/inputs/win_perf_counters/kernel32.go +++ b/plugins/inputs/win_perf_counters/kernel32.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 3a24761b9d593..d4e5f14a1c267 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_386.go b/plugins/inputs/win_perf_counters/pdh_386.go index 134d15c8d1461..ec572db72447e 100644 --- a/plugins/inputs/win_perf_counters/pdh_386.go +++ b/plugins/inputs/win_perf_counters/pdh_386.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_amd64.go b/plugins/inputs/win_perf_counters/pdh_amd64.go index ff3b39335bcd4..1afedc317260e 100644 --- a/plugins/inputs/win_perf_counters/pdh_amd64.go +++ b/plugins/inputs/win_perf_counters/pdh_amd64.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index a59f96b84dc43..ab130a41dec3f 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -1,4 +1,5 @@ // Go API over pdh syscalls +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index bd130a3fd79e9..3a74e34a5228a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters @@ -9,7 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -143,7 +144,7 @@ type Win_PerfCounters struct { PreVistaSupport bool UsePerfCounterTime bool Object []perfobject - CountersRefreshInterval internal.Duration + CountersRefreshInterval config.Duration UseWildcardsExpansion bool Log telegraf.Logger @@ -345,7 +346,7 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { // Parse the config once var err error - if m.lastRefreshed.IsZero() || (m.CountersRefreshInterval.Duration.Nanoseconds() > 0 && m.lastRefreshed.Add(m.CountersRefreshInterval.Duration).Before(time.Now())) { + if m.lastRefreshed.IsZero() || (m.CountersRefreshInterval > 0 && m.lastRefreshed.Add(time.Duration(m.CountersRefreshInterval)).Before(time.Now())) { if m.counters != nil { m.counters = m.counters[:0] } @@ -386,46 +387,36 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { // collect if m.UseWildcardsExpansion { value, err := m.query.GetFormattedCounterValueDouble(metric.counterHandle) - if err == nil { - addCounterMeasurement(metric, metric.instance, value, collectFields) - } else { + if err != nil { //ignore invalid data as some counters from process instances returns this sometimes if !isKnownCounterDataError(err) { return fmt.Errorf("error while getting value for counter %s: %v", metric.counterPath, err) } + m.Log.Warnf("error while getting value for counter %q, will skip metric: %v", metric.counterPath, err) + continue } + addCounterMeasurement(metric, metric.instance, value, collectFields) } else { counterValues, err := m.query.GetFormattedCounterArrayDouble(metric.counterHandle) - if err == nil { - for _, cValue := range counterValues { - var add bool - if metric.includeTotal { - // If IncludeTotal is set, include all. - add = true - } else if metric.instance == "*" && !strings.Contains(cValue.InstanceName, "_Total") { - // Catch if set to * and that it is not a '*_Total*' instance. - add = true - } else if metric.instance == cValue.InstanceName { - // Catch if we set it to total or some form of it - add = true - } else if strings.Contains(metric.instance, "#") && strings.HasPrefix(metric.instance, cValue.InstanceName) { - // If you are using a multiple instance identifier such as "w3wp#1" - // phd.dll returns only the first 2 characters of the identifier. - add = true - cValue.InstanceName = metric.instance - } else if metric.instance == "------" { - add = true - } - - if add { - addCounterMeasurement(metric, cValue.InstanceName, cValue.Value, collectFields) - } - } - } else { - //ignore invalid data as some counters from process instances returns this sometimes + if err != nil { + //ignore invalid data as some counters from process instances returns this sometimes if !isKnownCounterDataError(err) { return fmt.Errorf("error while getting value for counter %s: %v", metric.counterPath, err) } + m.Log.Warnf("error while getting value for counter %q, will skip metric: %v", metric.counterPath, err) + continue + } + for _, cValue := range counterValues { + + if strings.Contains(metric.instance, "#") && strings.HasPrefix(metric.instance, cValue.InstanceName) { + // If you are using a multiple instance identifier such as "w3wp#1" + // phd.dll returns only the first 2 characters of the identifier. + cValue.InstanceName = metric.instance + } + + if shouldIncludeMetric(metric, cValue) { + addCounterMeasurement(metric, cValue.InstanceName, cValue.Value, collectFields) + } } } } @@ -443,6 +434,25 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { return nil } +func shouldIncludeMetric(metric *counter, cValue CounterValue) bool { + if metric.includeTotal { + // If IncludeTotal is set, include all. + return true + } + if metric.instance == "*" && !strings.Contains(cValue.InstanceName, "_Total") { + // Catch if set to * and that it is not a '*_Total*' instance. + return true + } + if metric.instance == cValue.InstanceName { + // Catch if we set it to total or some form of it + return true + } + if metric.instance == "------" { + return true + } + return false +} + func addCounterMeasurement(metric *counter, instanceName string, value float64, collectFields map[instanceGrouping]map[string]interface{}) { measurement := sanitizedChars.Replace(metric.measurement) if measurement == "" { @@ -457,6 +467,7 @@ func addCounterMeasurement(metric *counter, instanceName string, value float64, func isKnownCounterDataError(err error) bool { if pdhErr, ok := err.(*PdhError); ok && (pdhErr.ErrorCode == PDH_INVALID_DATA || + pdhErr.ErrorCode == PDH_CALC_NEGATIVE_DENOMINATOR || pdhErr.ErrorCode == PDH_CALC_NEGATIVE_VALUE || pdhErr.ErrorCode == PDH_CSTATUS_INVALID_DATA || pdhErr.ErrorCode == PDH_NO_DATA) { @@ -467,6 +478,6 @@ func isKnownCounterDataError(err error) bool { func init() { inputs.Add("win_perf_counters", func() telegraf.Input { - return &Win_PerfCounters{query: &PerformanceQueryImpl{}, CountersRefreshInterval: internal.Duration{Duration: time.Second * 60}} + return &Win_PerfCounters{query: &PerformanceQueryImpl{}, CountersRefreshInterval: config.Duration(time.Second * 60)} }) } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 78917c2f2261f..a5ae58370ab4a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters @@ -8,13 +9,14 @@ import ( "testing" "time" + "strings" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "strings" ) -func TestWinPerformanceQueryImpl(t *testing.T) { +func TestWinPerformanceQueryImplIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -116,7 +118,7 @@ func TestWinPerformanceQueryImpl(t *testing.T) { } -func TestWinPerfcountersConfigGet1(t *testing.T) { +func TestWinPerfcountersConfigGet1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -150,7 +152,7 @@ func TestWinPerfcountersConfigGet1(t *testing.T) { require.NoError(t, err) } -func TestWinPerfcountersConfigGet2(t *testing.T) { +func TestWinPerfcountersConfigGet2Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -196,7 +198,7 @@ func TestWinPerfcountersConfigGet2(t *testing.T) { } } -func TestWinPerfcountersConfigGet3(t *testing.T) { +func TestWinPerfcountersConfigGet3Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -245,7 +247,7 @@ func TestWinPerfcountersConfigGet3(t *testing.T) { } } -func TestWinPerfcountersConfigGet4(t *testing.T) { +func TestWinPerfcountersConfigGet4Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -294,7 +296,7 @@ func TestWinPerfcountersConfigGet4(t *testing.T) { } } -func TestWinPerfcountersConfigGet5(t *testing.T) { +func TestWinPerfcountersConfigGet5Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -342,7 +344,7 @@ func TestWinPerfcountersConfigGet5(t *testing.T) { } } -func TestWinPerfcountersConfigGet6(t *testing.T) { +func TestWinPerfcountersConfigGet6Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -376,7 +378,7 @@ func TestWinPerfcountersConfigGet6(t *testing.T) { require.NoError(t, err) } -func TestWinPerfcountersConfigGet7(t *testing.T) { +func TestWinPerfcountersConfigGet7Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -424,7 +426,7 @@ func TestWinPerfcountersConfigGet7(t *testing.T) { } } -func TestWinPerfcountersConfigError1(t *testing.T) { +func TestWinPerfcountersConfigError1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -458,7 +460,7 @@ func TestWinPerfcountersConfigError1(t *testing.T) { require.Error(t, err) } -func TestWinPerfcountersConfigError2(t *testing.T) { +func TestWinPerfcountersConfigError2Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -494,7 +496,7 @@ func TestWinPerfcountersConfigError2(t *testing.T) { require.Error(t, err) } -func TestWinPerfcountersConfigError3(t *testing.T) { +func TestWinPerfcountersConfigError3Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -528,7 +530,7 @@ func TestWinPerfcountersConfigError3(t *testing.T) { require.Error(t, err) } -func TestWinPerfcountersCollect1(t *testing.T) { +func TestWinPerfcountersCollect1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -572,7 +574,7 @@ func TestWinPerfcountersCollect1(t *testing.T) { } } -func TestWinPerfcountersCollect2(t *testing.T) { +func TestWinPerfcountersCollect2Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go index 427f5d5461ff3..00af92b722552 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index a11f0ace8da3a..969b518d0f2b0 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters @@ -8,7 +9,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -734,7 +735,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { Object: perfObjects, UseWildcardsExpansion: true, query: fpm, - CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}, + CountersRefreshInterval: config.Duration(time.Second * 10), } var acc1 testutil.Accumulator err = m.Gather(&acc1) @@ -791,7 +792,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2) acc2.AssertDoesNotContainsTaggedFields(t, measurement, fields3, tags3) - time.Sleep(m.CountersRefreshInterval.Duration) + time.Sleep(time.Duration(m.CountersRefreshInterval)) var acc3 testutil.Accumulator err = m.Gather(&acc3) @@ -827,7 +828,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { Object: perfObjects, UseWildcardsExpansion: false, query: fpm, - CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} + CountersRefreshInterval: config.Duration(time.Second * 10)} var acc1 testutil.Accumulator err = m.Gather(&acc1) assert.Len(t, m.counters, 2) @@ -902,7 +903,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { fpm.Open() - time.Sleep(m.CountersRefreshInterval.Duration) + time.Sleep(time.Duration(m.CountersRefreshInterval)) var acc3 testutil.Accumulator err = m.Gather(&acc3) diff --git a/plugins/inputs/win_services/README.md b/plugins/inputs/win_services/README.md index eef641718b965..1d7aa63568949 100644 --- a/plugins/inputs/win_services/README.md +++ b/plugins/inputs/win_services/README.md @@ -8,10 +8,11 @@ Monitoring some services may require running Telegraf with administrator privile ```toml [[inputs.win_services]] - ## Names of the services to monitor. Leave empty to monitor all the available services on the host + ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. service_names = [ "LanmanServer", "TermService", + "Win*", ] ``` diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 6ac1bde68ca20..38f873a99284d 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services @@ -7,6 +8,7 @@ import ( "os" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" @@ -78,10 +80,11 @@ func (rmr *MgProvider) Connect() (WinServiceManager, error) { } var sampleConfig = ` - ## Names of the services to monitor. Leave empty to monitor all the available services on the host + ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. service_names = [ "LanmanServer", - "TermService", + "TermService", + "Win*", ] ` @@ -93,6 +96,8 @@ type WinServices struct { ServiceNames []string `toml:"service_names"` mgrProvider ManagerProvider + + servicesFilter filter.Filter } type ServiceInfo struct { @@ -102,6 +107,16 @@ type ServiceInfo struct { StartUpMode int } +func (m *WinServices) Init() error { + var err error + m.servicesFilter, err = filter.NewIncludeExcludeFilter(m.ServiceNames, nil) + if err != nil { + return err + } + + return nil +} + func (m *WinServices) Description() string { return description } @@ -117,7 +132,7 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error { } defer scmgr.Disconnect() - serviceNames, err := listServices(scmgr, m.ServiceNames) + serviceNames, err := m.listServices(scmgr) if err != nil { return err } @@ -152,16 +167,20 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error { } // listServices returns a list of services to gather. -func listServices(scmgr WinServiceManager, userServices []string) ([]string, error) { - if len(userServices) != 0 { - return userServices, nil - } - +func (m *WinServices) listServices(scmgr WinServiceManager) ([]string, error) { names, err := scmgr.ListServices() if err != nil { return nil, fmt.Errorf("Could not list services: %s", err) } - return names, nil + + var services []string + for _, n := range names { + if m.servicesFilter.Match(n) { + services = append(services, n) + } + } + + return services, nil } // collectServiceInfo gathers info about a service. diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 0c375c3dd2e65..3c831642a01cf 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows //these tests must be run under administrator account @@ -13,7 +14,7 @@ import ( var InvalidServices = []string{"XYZ1@", "ZYZ@", "SDF_@#"} var KnownServices = []string{"LanmanServer", "TermService"} -func TestList(t *testing.T) { +func TestListIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -22,14 +23,18 @@ func TestList(t *testing.T) { require.NoError(t, err) defer scmgr.Disconnect() - services, err := listServices(scmgr, KnownServices) + winServices := &WinServices{ + ServiceNames: KnownServices, + } + winServices.Init() + services, err := winServices.listServices(scmgr) require.NoError(t, err) require.Len(t, services, 2, "Different number of services") require.Equal(t, services[0], KnownServices[0]) require.Equal(t, services[1], KnownServices[1]) } -func TestEmptyList(t *testing.T) { +func TestEmptyListIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -38,12 +43,16 @@ func TestEmptyList(t *testing.T) { require.NoError(t, err) defer scmgr.Disconnect() - services, err := listServices(scmgr, []string{}) + winServices := &WinServices{ + ServiceNames: []string{}, + } + winServices.Init() + services, err := winServices.listServices(scmgr) require.NoError(t, err) require.Condition(t, func() bool { return len(services) > 20 }, "Too few service") } -func TestGatherErrors(t *testing.T) { +func TestGatherErrorsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -52,6 +61,7 @@ func TestGatherErrors(t *testing.T) { ServiceNames: InvalidServices, mgrProvider: &MgProvider{}, } + ws.Init() require.Len(t, ws.ServiceNames, 3, "Different number of services") var acc testutil.Accumulator require.NoError(t, ws.Gather(&acc)) diff --git a/plugins/inputs/win_services/win_services_notwindows.go b/plugins/inputs/win_services/win_services_notwindows.go index 062c11cfc8eed..aa2f3534ca74d 100644 --- a/plugins/inputs/win_services/win_services_notwindows.go +++ b/plugins/inputs/win_services/win_services_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_services diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index e33ab2ddce622..69a75372dd086 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services @@ -123,35 +124,50 @@ var testErrors = []testData{ {nil, errors.New("Fake srv query error"), nil, "Fake service 2", "", 0, 0}, {nil, nil, errors.New("Fake srv config error"), "Fake service 3", "", 0, 0}, }}, - {nil, nil, nil, []serviceTestInfo{ + {[]string{"Fake service 1"}, nil, nil, []serviceTestInfo{ {errors.New("Fake srv open error"), nil, nil, "Fake service 1", "", 0, 0}, }}, } func TestBasicInfo(t *testing.T) { - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[0]}, + } + winServices.Init() assert.NotEmpty(t, winServices.SampleConfig()) assert.NotEmpty(t, winServices.Description()) } func TestMgrErrors(t *testing.T) { //mgr.connect error - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[0]}, + } var acc1 testutil.Accumulator err := winServices.Gather(&acc1) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error()) ////mgr.listServices error - winServices = &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[1]}} + winServices = &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[1]}, + } var acc2 testutil.Accumulator err = winServices.Gather(&acc2) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error()) ////mgr.listServices error 2 - winServices = &WinServices{testutil.Logger{}, []string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}} + winServices = &WinServices{ + Log: testutil.Logger{}, + ServiceNames: []string{"Fake service 1"}, + mgrProvider: &FakeMgProvider{testErrors[3]}, + } + winServices.Init() var acc3 testutil.Accumulator buf := &bytes.Buffer{} @@ -162,7 +178,11 @@ func TestMgrErrors(t *testing.T) { } func TestServiceErrors(t *testing.T) { - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[2]}} + winServices := &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[2]}, + } + winServices.Init() var acc1 testutil.Accumulator buf := &bytes.Buffer{} @@ -184,8 +204,13 @@ var testSimpleData = []testData{ }}, } -func TestGather2(t *testing.T) { - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testSimpleData[0]}} +func TestGatherContainsTag(t *testing.T) { + winServices := &WinServices{ + Log: testutil.Logger{}, + ServiceNames: []string{"Service*"}, + mgrProvider: &FakeMgProvider{testSimpleData[0]}, + } + winServices.Init() var acc1 testutil.Accumulator require.NoError(t, winServices.Gather(&acc1)) assert.Len(t, acc1.Errors, 0, "There should be no errors after gather") diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 75890a7901074..29a0250d92b7f 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -1,10 +1,10 @@ +//go:build linux // +build linux package wireless import ( "bytes" - "io/ioutil" "log" "os" "path" @@ -46,7 +46,7 @@ func (w *Wireless) Gather(acc telegraf.Accumulator) error { w.loadPath() wirelessPath := path.Join(w.HostProc, "net", "wireless") - table, err := ioutil.ReadFile(wirelessPath) + table, err := os.ReadFile(wirelessPath) if err != nil { return err } diff --git a/plugins/inputs/wireless/wireless_notlinux.go b/plugins/inputs/wireless/wireless_notlinux.go index 4769acc970e42..435559ca58529 100644 --- a/plugins/inputs/wireless/wireless_notlinux.go +++ b/plugins/inputs/wireless/wireless_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package wireless diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go index 6c562887e54db..20c10de88a347 100644 --- a/plugins/inputs/wireless/wireless_test.go +++ b/plugins/inputs/wireless/wireless_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package wireless diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 760813ecc7adb..5211c38e9a9c2 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -3,19 +3,26 @@ This plugin provides information about X509 certificate accessible via local file or network connection. +When using a UDP address as a certificate source, the server must support [DTLS](https://en.wikipedia.org/wiki/Datagram_Transport_Layer_Security). + ### Configuration ```toml # Reads metrics from a SSL certificate [[inputs.x509_cert]] - ## List certificate sources - sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "https://example.org:443"] + ## List certificate sources, support wildcard expands for files + ## Prefix your entry with 'file://' if you intend to use relative paths + sources = ["tcp://example.org:443", "https://influxdata.com:443", + "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", + "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] ## Timeout for SSL connection # timeout = "5s" - ## Pass a different name into the TLS request (Server Name Indication) + ## Pass a different name into the TLS request (Server Name Indication). + ## This is synonymous with tls_server_name, and only one of the two + ## options may be specified at one time. ## example: server_name = "myhost.example.org" # server_name = "myhost.example.org" @@ -23,6 +30,7 @@ file or network connection. # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + # tls_server_name = "myhost.example.org" ``` diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 983926af16aeb..3486f2779eb2b 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -7,21 +7,28 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" "net" "net/url" + "os" + "path/filepath" "strings" "time" + "github.com/pion/dtls/v2" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/globpath" _tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) const sampleConfig = ` ## List certificate sources - sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] + ## Prefix your entry with 'file://' if you intend to use relative paths + sources = ["tcp://example.org:443", "https://influxdata.com:443", + "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", + "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] ## Timeout for SSL connection # timeout = "5s" @@ -39,11 +46,14 @@ const description = "Reads metrics from a SSL certificate" // X509Cert holds the configuration of the plugin. type X509Cert struct { - Sources []string `toml:"sources"` - Timeout internal.Duration `toml:"timeout"` - ServerName string `toml:"server_name"` + Sources []string `toml:"sources"` + Timeout config.Duration `toml:"timeout"` + ServerName string `toml:"server_name"` tlsCfg *tls.Config _tls.ClientConfig + locations []*url.URL + globpaths []*globpath.GlobPath + Log telegraf.Logger } // Description returns description of the plugin. @@ -56,43 +66,108 @@ func (c *X509Cert) SampleConfig() string { return sampleConfig } -func (c *X509Cert) locationToURL(location string) (*url.URL, error) { - if strings.HasPrefix(location, "/") { - location = "file://" + location +func (c *X509Cert) sourcesToURLs() error { + for _, source := range c.Sources { + if strings.HasPrefix(source, "file://") || + strings.HasPrefix(source, "/") { + source = filepath.ToSlash(strings.TrimPrefix(source, "file://")) + g, err := globpath.Compile(source) + if err != nil { + return fmt.Errorf("could not compile glob %v: %v", source, err) + } + c.globpaths = append(c.globpaths, g) + } else { + if strings.Index(source, ":\\") == 1 { + source = "file://" + filepath.ToSlash(source) + } + u, err := url.Parse(source) + if err != nil { + return fmt.Errorf("failed to parse cert location - %s", err.Error()) + } + c.locations = append(c.locations, u) + } } - u, err := url.Parse(location) - if err != nil { - return nil, fmt.Errorf("failed to parse cert location - %s", err.Error()) - } + return nil +} - return u, nil +func (c *X509Cert) serverName(u *url.URL) (string, error) { + if c.tlsCfg.ServerName != "" { + if c.ServerName != "" { + return "", fmt.Errorf("both server_name (%q) and tls_server_name (%q) are set, but they are mutually exclusive", c.ServerName, c.tlsCfg.ServerName) + } + return c.tlsCfg.ServerName, nil + } + if c.ServerName != "" { + return c.ServerName, nil + } + return u.Hostname(), nil } func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certificate, error) { + protocol := u.Scheme switch u.Scheme { - case "https": - u.Scheme = "tcp" - fallthrough case "udp", "udp4", "udp6": + ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout) + if err != nil { + return nil, err + } + defer ipConn.Close() + + serverName, err := c.serverName(u) + if err != nil { + return nil, err + } + + dtlsCfg := &dtls.Config{ + InsecureSkipVerify: true, + Certificates: c.tlsCfg.Certificates, + RootCAs: c.tlsCfg.RootCAs, + ServerName: serverName, + } + conn, err := dtls.Client(ipConn, dtlsCfg) + if err != nil { + return nil, err + } + defer conn.Close() + + rawCerts := conn.ConnectionState().PeerCertificates + var certs []*x509.Certificate + for _, rawCert := range rawCerts { + parsed, err := x509.ParseCertificate(rawCert) + if err != nil { + return nil, err + } + + if parsed != nil { + certs = append(certs, parsed) + } + } + + return certs, nil + case "https": + protocol = "tcp" fallthrough case "tcp", "tcp4", "tcp6": - ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout) + ipConn, err := net.DialTimeout(protocol, u.Host, timeout) if err != nil { return nil, err } defer ipConn.Close() - if c.ServerName == "" { - c.tlsCfg.ServerName = u.Hostname() - } else { - c.tlsCfg.ServerName = c.ServerName + serverName, err := c.serverName(u) + if err != nil { + return nil, err } + c.tlsCfg.ServerName = serverName c.tlsCfg.InsecureSkipVerify = true conn := tls.Client(ipConn, c.tlsCfg) defer conn.Close() + // reset SNI between requests + defer func() { c.tlsCfg.ServerName = "" }() + hsErr := conn.Handshake() if hsErr != nil { return nil, hsErr @@ -102,7 +177,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica return certs, nil case "file": - content, err := ioutil.ReadFile(u.Path) + content, err := os.ReadFile(u.Path) if err != nil { return nil, err } @@ -120,7 +195,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica } certs = append(certs, cert) } - if rest == nil || len(rest) == 0 { + if len(rest) == 0 { break } content = rest @@ -187,25 +262,45 @@ func getTags(cert *x509.Certificate, location string) map[string]string { return tags } +func (c *X509Cert) collectCertURLs() ([]*url.URL, error) { + var urls []*url.URL + + for _, path := range c.globpaths { + files := path.Match() + if len(files) <= 0 { + c.Log.Errorf("could not find file: %v", path) + continue + } + for _, file := range files { + file = "file://" + file + u, err := url.Parse(file) + if err != nil { + return urls, fmt.Errorf("failed to parse cert location - %s", err.Error()) + } + urls = append(urls, u) + } + } + + return urls, nil +} + // Gather adds metrics into the accumulator. func (c *X509Cert) Gather(acc telegraf.Accumulator) error { now := time.Now() + collectedUrls, err := c.collectCertURLs() + if err != nil { + acc.AddError(fmt.Errorf("cannot get file: %s", err.Error())) + } - for _, location := range c.Sources { - u, err := c.locationToURL(location) - if err != nil { - acc.AddError(err) - return nil - } - - certs, err := c.getCert(u, c.Timeout.Duration*time.Second) + for _, location := range append(c.locations, collectedUrls...) { + certs, err := c.getCert(location, time.Duration(c.Timeout)) if err != nil { acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) } for i, cert := range certs { fields := getFields(cert, now) - tags := getTags(cert, location) + tags := getTags(cert, location.String()) // The first certificate is the leaf/end-entity certificate which needs DNS // name validation against the URL hostname. @@ -214,10 +309,9 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, } if i == 0 { - if c.ServerName == "" { - opts.DNSName = u.Hostname() - } else { - opts.DNSName = c.ServerName + opts.DNSName, err = c.serverName(location) + if err != nil { + return err } for j, cert := range certs { if j != 0 { @@ -247,6 +341,11 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { } func (c *X509Cert) Init() error { + err := c.sourcesToURLs() + if err != nil { + return err + } + tlsCfg, err := c.ClientConfig.TLSConfig() if err != nil { return err @@ -255,6 +354,14 @@ func (c *X509Cert) Init() error { tlsCfg = &tls.Config{} } + if tlsCfg.ServerName != "" && c.ServerName == "" { + // Save SNI from tlsCfg.ServerName to c.ServerName and reset tlsCfg.ServerName. + // We need to reset c.tlsCfg.ServerName for each certificate when there's + // no explicit SNI (c.tlsCfg.ServerName or c.ServerName) otherwise we'll always (re)use + // first uri HostName for all certs (see issue 8914) + c.ServerName = tlsCfg.ServerName + tlsCfg.ServerName = "" + } c.tlsCfg = tlsCfg return nil @@ -264,7 +371,7 @@ func init() { inputs.Add("x509_cert", func() telegraf.Input { return &X509Cert{ Sources: []string{}, - Timeout: internal.Duration{Duration: 5}, + Timeout: config.Duration(5 * time.Second), // set default timeout to 5s } }) } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index c3452445739f1..f0b0379109749 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,17 +4,23 @@ import ( "crypto/tls" "encoding/base64" "fmt" - "io/ioutil" "math/big" + "net" + "net/url" "os" + "path/filepath" + "runtime" "testing" "time" + "github.com/pion/dtls/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + _tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -23,21 +29,16 @@ var pki = testutil.NewPKI("../../../testutil/pki") // Make sure X509Cert implements telegraf.Input var _ telegraf.Input = &X509Cert{} -func TestGatherRemote(t *testing.T) { - if testing.Short() { - t.Skip("Skipping network-dependent test in short mode.") - } +func TestGatherRemoteIntegration(t *testing.T) { + t.Skip("Skipping network-dependent test due to race condition when test-all") - tmpfile, err := ioutil.TempFile("", "example") - if err != nil { - t.Fatal(err) - } + tmpfile, err := os.CreateTemp("", "example") + require.NoError(t, err) defer os.Remove(tmpfile.Name()) - if _, err := tmpfile.Write([]byte(pki.ReadServerCert())); err != nil { - t.Fatal(err) - } + _, err = tmpfile.Write([]byte(pki.ReadServerCert())) + require.NoError(t, err) tests := []struct { name string @@ -51,7 +52,7 @@ func TestGatherRemote(t *testing.T) { {name: "wrong port", server: ":99999", error: true}, {name: "no server", timeout: 5}, {name: "successful https", server: "https://example.org:443", timeout: 5}, - {name: "successful file", server: "file://" + tmpfile.Name(), timeout: 5}, + {name: "successful file", server: "file://" + filepath.ToSlash(tmpfile.Name()), timeout: 5}, {name: "unsupported scheme", server: "foo://", timeout: 5, error: true}, {name: "no certificate", timeout: 5, unset: true, error: true}, {name: "closed connection", close: true, error: true}, @@ -59,11 +60,9 @@ func TestGatherRemote(t *testing.T) { } pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - config := &tls.Config{ + cfg := &tls.Config{ InsecureSkipVerify: true, Certificates: []tls.Certificate{pair}, } @@ -71,36 +70,30 @@ func TestGatherRemote(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { if test.unset { - config.Certificates = nil - config.GetCertificate = func(i *tls.ClientHelloInfo) (*tls.Certificate, error) { + cfg.Certificates = nil + cfg.GetCertificate = func(i *tls.ClientHelloInfo) (*tls.Certificate, error) { return nil, nil } } - ln, err := tls.Listen("tcp", ":0", config) - if err != nil { - t.Fatal(err) - } + ln, err := tls.Listen("tcp", ":0", cfg) + require.NoError(t, err) defer ln.Close() go func() { sconn, err := ln.Accept() - if err != nil { - return - } + require.NoError(t, err) if test.close { sconn.Close() } - serverConfig := config.Clone() + serverConfig := cfg.Clone() srv := tls.Server(sconn, serverConfig) if test.noshake { srv.Close() } - if err := srv.Handshake(); err != nil { - return - } + require.NoError(t, srv.Handshake()) }() if test.server == "" { @@ -109,9 +102,9 @@ func TestGatherRemote(t *testing.T) { sc := X509Cert{ Sources: []string{test.server}, - Timeout: internal.Duration{Duration: test.timeout}, + Timeout: config.Duration(test.timeout), } - sc.Init() + require.NoError(t, sc.Init()) sc.InsecureSkipVerify = true testErr := false @@ -156,42 +149,29 @@ func TestGatherLocal(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") - if err != nil { - t.Fatal(err) - } + f, err := os.CreateTemp("", "x509_cert") + require.NoError(t, err) _, err = f.Write([]byte(test.content)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - err = f.Chmod(test.mode) - if err != nil { - t.Fatal(err) + if runtime.GOOS != "windows" { + require.NoError(t, f.Chmod(test.mode)) } - err = f.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, f.Close()) defer os.Remove(f.Name()) sc := X509Cert{ Sources: []string{f.Name()}, } - sc.Init() - - error := false + require.NoError(t, sc.Init()) acc := testutil.Accumulator{} err = sc.Gather(&acc) - if len(acc.Errors) > 0 { - error = true - } - if error != test.error { + if (len(acc.Errors) > 0) != test.error { t.Errorf("%s", err) } }) @@ -201,31 +181,23 @@ func TestGatherLocal(t *testing.T) { func TestTags(t *testing.T) { cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) - f, err := ioutil.TempFile("", "x509_cert") - if err != nil { - t.Fatal(err) - } + f, err := os.CreateTemp("", "x509_cert") + require.NoError(t, err) _, err = f.Write([]byte(cert)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - err = f.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, f.Close()) defer os.Remove(f.Name()) sc := X509Cert{ Sources: []string{f.Name()}, } - sc.Init() + require.NoError(t, sc.Init()) acc := testutil.Accumulator{} - err = sc.Gather(&acc) - require.NoError(t, err) + require.NoError(t, sc.Gather(&acc)) assert.True(t, acc.HasMeasurement("x509_cert")) @@ -266,47 +238,63 @@ func TestGatherChain(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") - if err != nil { - t.Fatal(err) - } + f, err := os.CreateTemp("", "x509_cert") + require.NoError(t, err) _, err = f.Write([]byte(test.content)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - err = f.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, f.Close()) defer os.Remove(f.Name()) sc := X509Cert{ Sources: []string{f.Name()}, } - sc.Init() - - error := false + require.NoError(t, sc.Init()) acc := testutil.Accumulator{} err = sc.Gather(&acc) - if err != nil { - error = true - } - - if error != test.error { + if (err != nil) != test.error { t.Errorf("%s", err) } }) } +} + +func TestGatherUDPCert(t *testing.T) { + pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) + require.NoError(t, err) + + cfg := &dtls.Config{ + Certificates: []tls.Certificate{pair}, + } + + addr := &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0} + listener, err := dtls.Listen("udp", addr, cfg) + require.NoError(t, err) + defer listener.Close() + go func() { + _, _ = listener.Accept() + }() + + m := &X509Cert{ + Sources: []string{"udp://" + listener.Addr().String()}, + Log: testutil.Logger{}, + } + require.NoError(t, m.Init()) + + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + + assert.Len(t, acc.Errors, 0) + assert.True(t, acc.HasMeasurement("x509_cert")) } func TestStrings(t *testing.T) { sc := X509Cert{} - sc.Init() + require.NoError(t, sc.Init()) tests := []struct { name string @@ -327,7 +315,7 @@ func TestStrings(t *testing.T) { } } -func TestGatherCert(t *testing.T) { +func TestGatherCertIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -335,11 +323,73 @@ func TestGatherCert(t *testing.T) { m := &X509Cert{ Sources: []string{"https://www.influxdata.com:443"}, } - m.Init() + require.NoError(t, m.Init()) var acc testutil.Accumulator - err := m.Gather(&acc) - require.NoError(t, err) + require.NoError(t, m.Gather(&acc)) assert.True(t, acc.HasMeasurement("x509_cert")) } + +func TestGatherCertMustNotTimeout(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + duration := time.Duration(15) * time.Second + m := &X509Cert{ + Sources: []string{"https://www.influxdata.com:443"}, + Timeout: config.Duration(duration), + } + require.NoError(t, m.Init()) + + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + require.Empty(t, acc.Errors) + assert.True(t, acc.HasMeasurement("x509_cert")) +} + +func TestSourcesToURLs(t *testing.T) { + m := &X509Cert{ + Sources: []string{"https://www.influxdata.com:443", "tcp://influxdata.com:443", "file:///dummy_test_path_file.pem", "/tmp/dummy_test_path_glob*.pem"}, + } + require.NoError(t, m.Init()) + + assert.Equal(t, len(m.globpaths), 2) + assert.Equal(t, len(m.locations), 2) +} + +func TestServerName(t *testing.T) { + tests := []struct { + name string + fromTLS string + fromCfg string + url string + expected string + err bool + }{ + {name: "in cfg", fromCfg: "example.com", url: "https://other.example.com", expected: "example.com"}, + {name: "in tls", fromTLS: "example.com", url: "https://other.example.com", expected: "example.com"}, + {name: "from URL", url: "https://other.example.com", expected: "other.example.com"}, + {name: "errors", fromCfg: "otherex.com", fromTLS: "example.com", url: "https://other.example.com", err: true}, + } + + for _, elt := range tests { + test := elt + t.Run(test.name, func(t *testing.T) { + sc := &X509Cert{ + ServerName: test.fromCfg, + ClientConfig: _tls.ClientConfig{ServerName: test.fromTLS}, + } + require.NoError(t, sc.Init()) + u, err := url.Parse(test.url) + require.NoError(t, err) + actual, err := sc.serverName(u) + if test.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.expected, actual) + }) + } +} diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index f0e71a47d714c..77b101915bbe6 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -2,9 +2,9 @@ This ZFS plugin provides metrics from your ZFS filesystems. It supports ZFS on Linux and FreeBSD. It gets ZFS stat from `/proc/spl/kstat/zfs` on Linux and -from `sysctl` and `zpool` on FreeBSD. +from `sysctl`, 'zfs' and `zpool` on FreeBSD. -### Configuration: +## Configuration ```toml [[inputs.zfs]] @@ -22,11 +22,14 @@ from `sysctl` and `zpool` on FreeBSD. ## By default, don't gather zpool stats # poolMetrics = false + + ## By default, don't gather dataset stats + # datasetMetrics = false ``` -### Measurements & Fields: +### Measurements & Fields -By default this plugin collects metrics about ZFS internals and pool. +By default this plugin collects metrics about ZFS internals pool and dataset. These metrics are either counters or measure sizes in bytes. These metrics will be in the `zfs` measurement with the field names listed bellow. @@ -34,6 +37,9 @@ names listed bellow. If `poolMetrics` is enabled then additional metrics will be gathered for each pool. +If `datasetMetrics` is enabled then additional metrics will be gathered for +each dataset. + - zfs With fields listed bellow. @@ -183,44 +189,57 @@ each pool. On Linux (reference: kstat accumulated time and queue length statistics): - zfs_pool - - nread (integer, bytes) - - nwritten (integer, bytes) - - reads (integer, count) - - writes (integer, count) - - wtime (integer, nanoseconds) - - wlentime (integer, queuelength * nanoseconds) - - wupdate (integer, timestamp) - - rtime (integer, nanoseconds) - - rlentime (integer, queuelength * nanoseconds) - - rupdate (integer, timestamp) - - wcnt (integer, count) - - rcnt (integer, count) + - nread (integer, bytes) + - nwritten (integer, bytes) + - reads (integer, count) + - writes (integer, count) + - wtime (integer, nanoseconds) + - wlentime (integer, queuelength * nanoseconds) + - wupdate (integer, timestamp) + - rtime (integer, nanoseconds) + - rlentime (integer, queuelength * nanoseconds) + - rupdate (integer, timestamp) + - wcnt (integer, count) + - rcnt (integer, count) On FreeBSD: - zfs_pool - - allocated (integer, bytes) - - capacity (integer, bytes) - - dedupratio (float, ratio) - - free (integer, bytes) - - size (integer, bytes) - - fragmentation (integer, percent) + - allocated (integer, bytes) + - capacity (integer, bytes) + - dedupratio (float, ratio) + - free (integer, bytes) + - size (integer, bytes) + - fragmentation (integer, percent) + +#### Dataset Metrics (optional, only on FreeBSD) -### Tags: +- zfs_dataset + - avail (integer, bytes) + - used (integer, bytes) + - usedsnap (integer, bytes + - usedds (integer, bytes) + +### Tags - ZFS stats (`zfs`) will have the following tag: - - pools - A `::` concatenated list of all ZFS pools on the machine. + - pools - A `::` concatenated list of all ZFS pools on the machine. + - datasets - A `::` concatenated list of all ZFS datasets on the machine. - Pool metrics (`zfs_pool`) will have the following tag: - - pool - with the name of the pool which the metrics are for. - - health - the health status of the pool. (FreeBSD only) + - pool - with the name of the pool which the metrics are for. + - health - the health status of the pool. (FreeBSD only) -### Example Output: +- Dataset metrics (`zfs_dataset`) will have the following tag: + - dataset - with the name of the dataset which the metrics are for. -``` +### Example Output + +```shell $ ./telegraf --config telegraf.conf --input-filter zfs --test * Plugin: zfs, Collection 1 > zfs_pool,health=ONLINE,pool=zroot allocated=1578590208i,capacity=2i,dedupratio=1,fragmentation=1i,free=64456531968i,size=66035122176i 1464473103625653908 +> zfs_dataset,dataset=zata avail=10741741326336,used=8564135526400,usedsnap=0,usedds=90112 > zfs,pools=zroot arcstats_allocated=4167764i,arcstats_anon_evictable_data=0i,arcstats_anon_evictable_metadata=0i,arcstats_anon_size=16896i,arcstats_arc_meta_limit=10485760i,arcstats_arc_meta_max=115269568i,arcstats_arc_meta_min=8388608i,arcstats_arc_meta_used=51977456i,arcstats_c=16777216i,arcstats_c_max=41943040i,arcstats_c_min=16777216i,arcstats_data_size=0i,arcstats_deleted=1699340i,arcstats_demand_data_hits=14836131i,arcstats_demand_data_misses=2842945i,arcstats_demand_hit_predictive_prefetch=0i,arcstats_demand_metadata_hits=1655006i,arcstats_demand_metadata_misses=830074i,arcstats_duplicate_buffers=0i,arcstats_duplicate_buffers_size=0i,arcstats_duplicate_reads=123i,arcstats_evict_l2_cached=0i,arcstats_evict_l2_eligible=332172623872i,arcstats_evict_l2_ineligible=6168576i,arcstats_evict_l2_skip=0i,arcstats_evict_not_enough=12189444i,arcstats_evict_skip=195190764i,arcstats_hash_chain_max=2i,arcstats_hash_chains=10i,arcstats_hash_collisions=43134i,arcstats_hash_elements=2268i,arcstats_hash_elements_max=6136i,arcstats_hdr_size=565632i,arcstats_hits=16515778i,arcstats_l2_abort_lowmem=0i,arcstats_l2_asize=0i,arcstats_l2_cdata_free_on_write=0i,arcstats_l2_cksum_bad=0i,arcstats_l2_compress_failures=0i,arcstats_l2_compress_successes=0i,arcstats_l2_compress_zeros=0i,arcstats_l2_evict_l1cached=0i,arcstats_l2_evict_lock_retry=0i,arcstats_l2_evict_reading=0i,arcstats_l2_feeds=0i,arcstats_l2_free_on_write=0i,arcstats_l2_hdr_size=0i,arcstats_l2_hits=0i,arcstats_l2_io_error=0i,arcstats_l2_misses=0i,arcstats_l2_read_bytes=0i,arcstats_l2_rw_clash=0i,arcstats_l2_size=0i,arcstats_l2_write_buffer_bytes_scanned=0i,arcstats_l2_write_buffer_iter=0i,arcstats_l2_write_buffer_list_iter=0i,arcstats_l2_write_buffer_list_null_iter=0i,arcstats_l2_write_bytes=0i,arcstats_l2_write_full=0i,arcstats_l2_write_in_l2=0i,arcstats_l2_write_io_in_progress=0i,arcstats_l2_write_not_cacheable=380i,arcstats_l2_write_passed_headroom=0i,arcstats_l2_write_pios=0i,arcstats_l2_write_spa_mismatch=0i,arcstats_l2_write_trylock_fail=0i,arcstats_l2_writes_done=0i,arcstats_l2_writes_error=0i,arcstats_l2_writes_lock_retry=0i,arcstats_l2_writes_sent=0i,arcstats_memory_throttle_count=0i,arcstats_metadata_size=17014784i,arcstats_mfu_evictable_data=0i,arcstats_mfu_evictable_metadata=16384i,arcstats_mfu_ghost_evictable_data=5723648i,arcstats_mfu_ghost_evictable_metadata=10709504i,arcstats_mfu_ghost_hits=1315619i,arcstats_mfu_ghost_size=16433152i,arcstats_mfu_hits=7646611i,arcstats_mfu_size=305152i,arcstats_misses=3676993i,arcstats_mru_evictable_data=0i,arcstats_mru_evictable_metadata=0i,arcstats_mru_ghost_evictable_data=0i,arcstats_mru_ghost_evictable_metadata=80896i,arcstats_mru_ghost_hits=324250i,arcstats_mru_ghost_size=80896i,arcstats_mru_hits=8844526i,arcstats_mru_size=16693248i,arcstats_mutex_miss=354023i,arcstats_other_size=34397040i,arcstats_p=4172800i,arcstats_prefetch_data_hits=0i,arcstats_prefetch_data_misses=0i,arcstats_prefetch_metadata_hits=24641i,arcstats_prefetch_metadata_misses=3974i,arcstats_size=51977456i,arcstats_sync_wait_for_async=0i,vdev_cache_stats_delegations=779i,vdev_cache_stats_hits=323123i,vdev_cache_stats_misses=59929i,zfetchstats_hits=0i,zfetchstats_max_streams=0i,zfetchstats_misses=0i 1464473103634124908 ``` @@ -268,8 +287,9 @@ A short description for some of the metrics. `arcstats_evict_l2_ineligible` We evicted something which cannot be stored in the l2. Reasons could be: - - We have multiple pools, we evicted something from a pool without an l2 device. - - The zfs property secondary cache. + +- We have multiple pools, we evicted something from a pool without an l2 device. +- The zfs property secondary cache. `arcstats_c` Arc target size, this is the size the system thinks the arc should have. @@ -294,6 +314,7 @@ A short description for some of the metrics. `zfetchstats_stride_hits` Counts the number of cache hits, to items which are in the cache because of the prefetcher (prefetched stride reads) #### Vdev Cache Stats (FreeBSD only) + note: the vdev cache is deprecated in some ZFS implementations `vdev_cache_stats_hits` Hits to the vdev (device level) cache. @@ -301,6 +322,7 @@ note: the vdev cache is deprecated in some ZFS implementations `vdev_cache_stats_misses` Misses to the vdev (device level) cache. #### ABD Stats (Linux Only) + ABD is a linear/scatter dual typed buffer for ARC `abdstats_linear_cnt` number of linear ABDs which are currently allocated @@ -324,6 +346,7 @@ ABD is a linear/scatter dual typed buffer for ARC `fm_erpt-dropped` counts when an error report cannot be created (eg available memory is too low) #### ZIL (Linux Only) + note: ZIL measurements are system-wide, neither per-pool nor per-dataset `zil_commit_count` counts when ZFS transactions are committed to a ZIL diff --git a/plugins/inputs/zfs/zfs.go b/plugins/inputs/zfs/zfs.go index 8e6bec4644932..4e1999cf68d30 100644 --- a/plugins/inputs/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -1,14 +1,22 @@ package zfs +import ( + "github.com/influxdata/telegraf" +) + type Sysctl func(metric string) ([]string, error) type Zpool func() ([]string, error) +type Zdataset func(properties []string) ([]string, error) type Zfs struct { - KstatPath string - KstatMetrics []string - PoolMetrics bool - sysctl Sysctl - zpool Zpool + KstatPath string + KstatMetrics []string + PoolMetrics bool + DatasetMetrics bool + sysctl Sysctl //nolint:varcheck,unused // False positive - this var is used for non-default build tag: freebsd + zpool Zpool //nolint:varcheck,unused // False positive - this var is used for non-default build tag: freebsd + zdataset Zdataset //nolint:varcheck,unused // False positive - this var is used for non-default build tag: freebsd + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -24,6 +32,8 @@ var sampleConfig = ` # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] ## By default, don't gather zpool stats # poolMetrics = false + ## By default, don't gather zdataset stats + # datasetMetrics = false ` func (z *Zfs) SampleConfig() string { @@ -31,5 +41,5 @@ func (z *Zfs) SampleConfig() string { } func (z *Zfs) Description() string { - return "Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools" + return "Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets" } diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 51c20682e832b..e493e3fc9a0bb 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs @@ -87,6 +88,47 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { return strings.Join(pools, "::"), nil } +func (z *Zfs) gatherDatasetStats(acc telegraf.Accumulator) (string, error) { + properties := []string{"name", "avail", "used", "usedsnap", "usedds"} + + lines, err := z.zdataset(properties) + if err != nil { + return "", err + } + + datasets := []string{} + for _, line := range lines { + col := strings.Split(line, "\t") + + datasets = append(datasets, col[0]) + } + + if z.DatasetMetrics { + for _, line := range lines { + col := strings.Split(line, "\t") + if len(col) != len(properties) { + z.Log.Warnf("Invalid number of columns for line: %s", line) + continue + } + + tags := map[string]string{"dataset": col[0]} + fields := map[string]interface{}{} + + for i, key := range properties[1:] { + value, err := strconv.ParseInt(col[i+1], 10, 64) + if err != nil { + return "", fmt.Errorf("Error parsing %s %q: %s", key, col[i+1], err) + } + fields[key] = value + } + + acc.AddFields("zfs_dataset", fields, tags) + } + } + + return strings.Join(datasets, "::"), nil +} + func (z *Zfs) Gather(acc telegraf.Accumulator) error { kstatMetrics := z.KstatMetrics if len(kstatMetrics) == 0 { @@ -99,6 +141,11 @@ func (z *Zfs) Gather(acc telegraf.Accumulator) error { return err } tags["pools"] = poolNames + datasetNames, err := z.gatherDatasetStats(acc) + if err != nil { + return err + } + tags["datasets"] = datasetNames fields := make(map[string]interface{}) for _, metric := range kstatMetrics { @@ -127,8 +174,11 @@ func run(command string, args ...string) ([]string, error) { stdout := strings.TrimSpace(outbuf.String()) stderr := strings.TrimSpace(errbuf.String()) - if _, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("%s error: %s", command, stderr) + if err != nil { + if _, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("%s error: %s", command, stderr) + } + return nil, fmt.Errorf("%s error: %s", command, err) } return strings.Split(stdout, "\n"), nil } @@ -137,6 +187,10 @@ func zpool() ([]string, error) { return run("zpool", []string{"list", "-Hp", "-o", "name,health,size,alloc,free,fragmentation,capacity,dedupratio"}...) } +func zdataset(properties []string) ([]string, error) { + return run("zfs", []string{"list", "-Hp", "-o", strings.Join(properties, ",")}...) +} + func sysctl(metric string) ([]string, error) { return run("sysctl", []string{"-q", fmt.Sprintf("kstat.zfs.misc.%s", metric)}...) } @@ -144,8 +198,9 @@ func sysctl(metric string) ([]string, error) { func init() { inputs.Add("zfs", func() telegraf.Input { return &Zfs{ - sysctl: sysctl, - zpool: zpool, + sysctl: sysctl, + zpool: zpool, + zdataset: zdataset, } }) } diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 87f21f67245f4..816f82b6dbf5b 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs @@ -31,6 +32,18 @@ func mock_zpool_unavail() ([]string, error) { return zpool_output_unavail, nil } +// $ zfs list -Hp -o name,avail,used,usedsnap,usedds +var zdataset_output = []string{ + "zata 10741741326336 8564135526400 0 90112", + "zata/home 10741741326336 2498560 212992 2285568", + "zata/import 10741741326336 196608 81920 114688", + "zata/storage 10741741326336 8556084379648 3601138999296 4954945380352", +} + +func mock_zdataset() ([]string, error) { + return zdataset_output, nil +} + // sysctl -q kstat.zfs.misc.arcstats // sysctl -q kstat.zfs.misc.vdev_cache_stats @@ -126,6 +139,39 @@ func TestZfsPoolMetrics_unavail(t *testing.T) { acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags) } +func TestZfsDatasetMetrics(t *testing.T) { + var acc testutil.Accumulator + + z := &Zfs{ + KstatMetrics: []string{"vdev_cache_stats"}, + sysctl: mock_sysctl, + zdataset: mock_zdataset, + } + err := z.Gather(&acc) + require.NoError(t, err) + + require.False(t, acc.HasMeasurement("zfs_dataset")) + acc.Metrics = nil + + z = &Zfs{ + KstatMetrics: []string{"vdev_cache_stats"}, + DatasetMetrics: true, + sysctl: mock_sysctl, + zdataset: mock_zdataset, + } + err = z.Gather(&acc) + require.NoError(t, err) + + //one pool, all metrics + tags := map[string]string{ + "dataset": "zata", + } + + datasetMetrics := getZataDatasetMetrics() + + acc.AssertContainsTaggedFields(t, "zfs_dataset", datasetMetrics, tags) +} + func TestZfsGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator @@ -178,6 +224,15 @@ func getTemp2PoolMetrics() map[string]interface{} { } } +func getZataDatasetMetrics() map[string]interface{} { + return map[string]interface{}{ + "avail": int64(10741741326336), + "used": int64(8564135526400), + "usedsnap": int64(0), + "usedds": int64(90112), + } +} + func getKstatMetricsVdevOnly() map[string]interface{} { return map[string]interface{}{ "vdev_cache_stats_misses": int64(87789), diff --git a/plugins/inputs/zfs/zfs_linux.go b/plugins/inputs/zfs/zfs_linux.go index 276880d7dea97..ac3ca6ee81d23 100644 --- a/plugins/inputs/zfs/zfs_linux.go +++ b/plugins/inputs/zfs/zfs_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package zfs diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 133d1cafa53c9..b844759eaffd1 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package zfs import ( - "io/ioutil" "os" "testing" @@ -115,7 +115,7 @@ streams_resets 4 20989756 streams_noresets 4 503182328 bogus_streams 4 0 ` -const pool_ioContents = `11 3 0x00 1 80 2225326830828 32953476980628 +const poolIoContents = `11 3 0x00 1 80 2225326830828 32953476980628 nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt 1884160 6450688 22 978 272187126 2850519036 2263669418655 424226814 2850519036 2263669871823 0 0 ` @@ -142,7 +142,7 @@ erpt-set-failed 4 202 fmri-set-failed 4 303 payload-set-failed 4 404 ` -const dmu_txContents = `5 1 0x01 11 528 34103260832 437683925071438 +const dmuTxContents = `5 1 0x01 11 528 34103260832 437683925071438 name type data dmu_tx_assigned 4 39321636 dmu_tx_delay 4 111 @@ -182,67 +182,6 @@ scatter_page_alloc_retry 4 99311 scatter_sg_table_retry 4 99221 ` -const dbufcachestatsContents = ` -15 1 0x01 11 2992 6257505590736 8516276189184 -name type data -size 4 242688 -size_max 4 338944 -max_bytes 4 62834368 -lowater_bytes 4 56550932 -hiwater_bytes 4 69117804 -total_evicts 4 0 -hash_collisions 4 0 -hash_elements 4 31 -hash_elements_max 4 32 -hash_chains 4 0 -hash_chain_max 4 0 -` - -const dnodestatsContents = ` -10 1 0x01 28 7616 6257498525011 8671911551753 -name type data -dnode_hold_dbuf_hold 4 0 -dnode_hold_dbuf_read 4 0 -dnode_hold_alloc_hits 4 1460 -dnode_hold_alloc_misses 4 0 -dnode_hold_alloc_interior 4 0 -dnode_hold_alloc_lock_retry 4 0 -dnode_hold_alloc_lock_misses 4 0 -dnode_hold_alloc_type_none 4 0 -dnode_hold_free_hits 4 2 -dnode_hold_free_misses 4 0 -dnode_hold_free_lock_misses 4 0 -dnode_hold_free_lock_retry 4 0 -dnode_hold_free_overflow 4 0 -dnode_hold_free_refcount 4 0 -dnode_hold_free_txg 4 0 -dnode_allocate 4 2 -dnode_reallocate 4 0 -dnode_buf_evict 4 6 -dnode_alloc_next_chunk 4 1 -dnode_alloc_race 4 0 -dnode_alloc_next_block 4 0 -dnode_move_invalid 4 0 -dnode_move_recheck1 4 0 -dnode_move_recheck2 4 0 -dnode_move_special 4 0 -dnode_move_handle 4 0 -dnode_move_rwlock 4 0 -dnode_move_active 4 0 -` - -const vdevmirrorcachestatsContents = ` -18 1 0x01 7 1904 6257505684227 9638257816287 -name type data -rotating_linear 4 0 -rotating_offset 4 0 -rotating_seek 4 0 -non_rotating_linear 4 0 -non_rotating_seek 4 0 -preferred_found 4 0 -preferred_not_found 4 43 -` - var testKstatPath = os.TempDir() + "/telegraf/proc/spl/kstat/zfs" func TestZfsPoolMetrics(t *testing.T) { @@ -252,10 +191,10 @@ func TestZfsPoolMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(pool_ioContents), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) poolMetrics := getPoolMetrics() @@ -291,25 +230,25 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) + err = os.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) + err = os.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/dmu_tx", []byte(dmu_txContents), 0644) + err = os.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) require.NoError(t, err) intMetrics := getKstatMetricsAll() @@ -332,7 +271,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) require.NoError(t, err) tags = map[string]string{ diff --git a/plugins/inputs/zfs/zfs_other.go b/plugins/inputs/zfs/zfs_other.go index 98de02be917dd..963afd3580ff8 100644 --- a/plugins/inputs/zfs/zfs_other.go +++ b/plugins/inputs/zfs/zfs_other.go @@ -1,3 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package zfs diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index ea25b49a0fcca..a1abccc420ad9 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -24,7 +24,10 @@ import ( "log" "time" - zipkin "github.com/openzipkin/zipkin-go-opentracing" + otlog "github.com/opentracing/opentracing-go/log" + zipkinot "github.com/openzipkin-contrib/zipkin-go-opentracing" + "github.com/openzipkin/zipkin-go" + zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http" ) var ( @@ -35,8 +38,6 @@ var ( ZipkinServerHost string ) -const usage = `./stress_test_write -batch_size= -max_backlog= -batch_interval= -span_count -zipkin_host=` - func init() { flag.IntVar(&BatchSize, "batch_size", 10000, "") flag.IntVar(&MaxBackLog, "max_backlog", 100000, "") @@ -48,27 +49,30 @@ func init() { func main() { flag.Parse() var hostname = fmt.Sprintf("http://%s:9411/api/v1/spans", ZipkinServerHost) - collector, err := zipkin.NewHTTPCollector( + reporter := zipkinhttp.NewReporter( hostname, - zipkin.HTTPBatchSize(BatchSize), - zipkin.HTTPMaxBacklog(MaxBackLog), - zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) - defer collector.Close() + zipkinhttp.BatchSize(BatchSize), + zipkinhttp.MaxBacklog(MaxBackLog), + zipkinhttp.BatchInterval(time.Duration(BatchTimeInterval)*time.Second), + ) + defer reporter.Close() + + endpoint, err := zipkin.NewEndpoint("Trivial", "127.0.0.1:0") if err != nil { - log.Fatalf("Error initializing zipkin http collector: %v\n", err) + log.Fatalf("Error: %v\n", err) } - tracer, err := zipkin.NewTracer( - zipkin.NewRecorder(collector, false, "127.0.0.1:0", "Trivial")) - + nativeTracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(endpoint)) if err != nil { log.Fatalf("Error: %v\n", err) } + tracer := zipkinot.Wrap(nativeTracer) + log.Printf("Writing %d spans to zipkin server at %s\n", SpanCount, hostname) for i := 0; i < SpanCount; i++ { parent := tracer.StartSpan("Parent") - parent.LogEvent(fmt.Sprintf("Trace%d", i)) + parent.LogFields(otlog.Message(fmt.Sprintf("Trace%d", i))) parent.Finish() } log.Println("Done. Flushing remaining spans...") diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index dde89570b8969..09518103b22cc 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -24,15 +24,16 @@ Otherwise, the input file will be interpreted as json, and the output will be en package main import ( + "context" "encoding/json" "errors" "flag" "fmt" - "io/ioutil" "log" + "os" "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) var ( @@ -51,7 +52,7 @@ func init() { func main() { flag.Parse() - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { log.Fatalf("Error reading file: %v\n", err) } @@ -62,7 +63,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } case "thrift": @@ -70,7 +71,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } default: @@ -99,23 +100,21 @@ func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) { } zspans = append(zspans, spans...) - fmt.Println(spans) - buf := thrift.NewTMemoryBuffer() - transport := thrift.NewTBinaryProtocolTransport(buf) + transport := thrift.NewTBinaryProtocolConf(buf, nil) - if err = transport.WriteListBegin(thrift.STRUCT, len(spans)); err != nil { + if err = transport.WriteListBegin(context.Background(), thrift.STRUCT, len(spans)); err != nil { return nil, fmt.Errorf("error in beginning thrift write: %v", err) } for _, span := range zspans { - err = span.Write(transport) + err = span.Write(context.Background(), transport) if err != nil { return nil, fmt.Errorf("error converting zipkin struct to thrift: %v", err) } } - if err = transport.WriteListEnd(); err != nil { + if err = transport.WriteListEnd(context.Background()); err != nil { return nil, fmt.Errorf("error finishing thrift write: %v", err) } @@ -129,8 +128,8 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { err = fmt.Errorf("error in ReadListBegin: %v", err) return nil, err @@ -139,14 +138,14 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { var spans []*zipkincore.Span for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { err = fmt.Errorf("Error reading into zipkin struct: %v", err) return nil, err } spans = append(spans, zs) } - err = transport.ReadListEnd() + err = transport.ReadListEnd(context.Background()) if err != nil { err = fmt.Errorf("error ending thrift read: %v", err) return nil, err diff --git a/plugins/inputs/zipkin/codec/codec.go b/plugins/inputs/zipkin/codec/codec.go index 167b8ec24f1a3..2754e13d969e7 100644 --- a/plugins/inputs/zipkin/codec/codec.go +++ b/plugins/inputs/zipkin/codec/codec.go @@ -3,8 +3,8 @@ package codec import ( "time" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" "github.com/influxdata/telegraf/plugins/inputs/zipkin/trace" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" ) //now is a mockable time for now diff --git a/plugins/inputs/zipkin/codec/codec_test.go b/plugins/inputs/zipkin/codec/codec_test.go index 3525f30c201d6..c3d2fa655dcc6 100644 --- a/plugins/inputs/zipkin/codec/codec_test.go +++ b/plugins/inputs/zipkin/codec/codec_test.go @@ -12,9 +12,6 @@ import ( ) func Test_MicroToTime(t *testing.T) { - type args struct { - micro int64 - } tests := []struct { name string micro int64 diff --git a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go index 1803486742301..4c054126fa95e 100644 --- a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go +++ b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // JSON decodes spans from bodies `POST`ed to the spans endpoint diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go new file mode 100644 index 0000000000000..be7b2034832d4 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,5 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +var GoUnusedProtection__ int diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go new file mode 100644 index 0000000000000..7c5b5825acaa6 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go @@ -0,0 +1,47 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const MESSAGE_SEND = "ms" +const MESSAGE_RECV = "mr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const HTTP_HOST = "http.host" +const HTTP_METHOD = "http.method" +const HTTP_PATH = "http.path" +const HTTP_ROUTE = "http.route" +const HTTP_URL = "http.url" +const HTTP_STATUS_CODE = "http.status_code" +const HTTP_REQUEST_SIZE = "http.request.size" +const HTTP_RESPONSE_SIZE = "http.response.size" +const LOCAL_COMPONENT = "lc" +const ERROR = "error" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" +const MESSAGE_ADDR = "ma" + +func init() { +} diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go new file mode 100644 index 0000000000000..258fd4d1a0afc --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go @@ -0,0 +1,1556 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +//A subset of thrift base types, except BYTES. +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AnnotationType(v) + return nil +} + +func (p *AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port or 0, if unknown. +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// - ServiceName: Classifier of a source or destination in lowercase, such as "zipkin-web". +// +// This is the primary parameter for trace lookup, so should be intuitive as +// possible, for example, matching names in service discovery. +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// However, it is also permissible to set service_name = "" (empty string). +// The difference in the latter usage is that the span will not be queryable +// by service name unless more information is added to the span with non-empty +// service name, e.g. an additional annotation from the server. +// +// Particularly clients may not have a reliable service name at ingest. One +// approach is to set service_name to "" at ingest, and later assign a +// better label based on binary annotations, such as user agent. +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} + +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ipv6 = v + } + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) + } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { + return false + } + if p.Port != other.Port { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { + return false + } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// Associates an event that explains latency with a timestamp. +// +// Unlike log statements, annotations are often codes: for example "sr". +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or multiplying currentTimeMillis by 1000. +// - Value: Usually a short tag indicating an event, like "sr" or "finagle.retry". +// - Host: The host that recorded the value, primarily for query by service name. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if p.Value != other.Value { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of HTTP_PATH ("http.path") could the path +// to a resource in a RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.path" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key: Name used to lookup spans, such as "http.path" or "finagle.version". +// - Value: Serialized thrift bytes, in TBinaryProtocol format. +// +// For legacy reasons, byte order is big-endian. See THRIFT-3217. +// - AnnotationType: The thrift type of value, most often STRING. +// +// annotation_type shouldn't vary for the same key. +// - Host: The host that recorded value, allowing query by service name or address. +// +// There are two exceptions: when key is "ca" or "sa", this is the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, such as browsers or databases. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if bytes.Compare(p.Value, other.Value) != 0 { + return false + } + if p.AnnotationType != other.AnnotationType { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// Spans are usually created by instrumentation in RPC clients or servers, but +// can also represent in-process activity. Annotations in spans are similar to +// log statements, and are sometimes created directly by application developers +// to indicate events of interest, such as a cache miss. +// +// The root span is where parent_id = Nil; it usually has the longest duration +// in the trace. +// +// Span identifiers are packed into i64s, but should be treated opaquely. +// String encoding is fixed-width lower-hex, to avoid signed interpretation. +// +// Attributes: +// - TraceID: Unique 8-byte identifier for a trace, set on all spans within it. +// - Name: Span name in lowercase, rpc method for example. Conventionally, when the +// span name isn't known, name = "unknown". +// - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely +// identified in storage by (trace_id, id). +// - ParentID: The parent's Span.id; absent if this the root span in a trace. +// - Annotations: Associates events that explain latency with a timestamp. Unlike log +// statements, annotations are often codes: for example SERVER_RECV("sr"). +// Annotations are sorted ascending by timestamp. +// - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For +// example, a binary annotation key could be "http.path". +// - Debug: True is a request to store this span even if it overrides sampling policy. +// - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete +// span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// Timestamp is nullable for input only. Spans without a timestamp cannot be +// presented in a timeline: Span stores should not output spans missing a +// timestamp. +// +// There are two known edge-cases where this could be absent: both cases +// exist when a collector receives a span in parts and a binary annotation +// precedes a timestamp. This is possible when.. +// - The span is in-flight (ex not yet received a timestamp) +// - The span's start event was lost +// - Duration: Measurement in microseconds of the critical path, if known. Durations of +// less than one microsecond must be rounded up to 1 microsecond. +// +// This value should be set directly, as opposed to implicitly via annotation +// timestamps. Doing so encourages precision decoupled from problems of +// clocks, such as skew or NTP updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} + +var Span_TraceIDHigh_DEFAULT int64 + +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } + return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.TraceIDHigh = &v + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + if err := p.writeField12(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { + return false + } + if p.Name != other.Name { + return false + } + if p.ID != other.ID { + return false + } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { + return false + } + } + if len(p.Annotations) != len(other.Annotations) { + return false + } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { + return false + } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { + return false + } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { + return false + } + } + if p.Debug != other.Debug { + return false + } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { + return false + } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { + return false + } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} diff --git a/plugins/inputs/zipkin/codec/thrift/thrift.go b/plugins/inputs/zipkin/codec/thrift/thrift.go index 65a9e1488c2c4..c2c60a3395d2d 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift.go @@ -1,16 +1,16 @@ package thrift import ( + "context" "encoding/binary" "fmt" "net" "strconv" "time" - "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // UnmarshalThrift converts raw bytes in thrift format to a slice of spans @@ -20,8 +20,8 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { return nil, err } @@ -29,13 +29,13 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { spans := make([]*zipkincore.Span, size) for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { return nil, err } spans[i] = zs } - if err = transport.ReadListEnd(); err != nil { + if err = transport.ReadListEnd(context.Background()); err != nil { return nil, err } return spans, nil diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index 798fc269edf86..ea566e4bfd0c8 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -1,12 +1,12 @@ package thrift import ( - "io/ioutil" + "os" "testing" "github.com/google/go-cmp/cmp" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) func Test_endpointHost(t *testing.T) { @@ -193,7 +193,7 @@ func TestUnmarshalThrift(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dat, err := ioutil.ReadFile(tt.filename) + dat, err := os.ReadFile(tt.filename) if err != nil { t.Fatalf("Could not find file %s\n", tt.filename) } diff --git a/plugins/inputs/zipkin/handler.go b/plugins/inputs/zipkin/handler.go index 24e7ac12f01be..83288bd6e4b2e 100644 --- a/plugins/inputs/zipkin/handler.go +++ b/plugins/inputs/zipkin/handler.go @@ -3,7 +3,7 @@ package zipkin import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "mime" "net/http" "strings" @@ -88,7 +88,7 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnsupportedMediaType) } - octets, err := ioutil.ReadAll(body) + octets, err := io.ReadAll(body) if err != nil { s.recorder.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/plugins/inputs/zipkin/handler_test.go b/plugins/inputs/zipkin/handler_test.go index b0176a22ead3c..f6e8bece80240 100644 --- a/plugins/inputs/zipkin/handler_test.go +++ b/plugins/inputs/zipkin/handler_test.go @@ -2,9 +2,10 @@ package zipkin import ( "bytes" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "os" "strconv" "testing" "time" @@ -28,7 +29,7 @@ func (m *MockRecorder) Error(err error) { } func TestSpanHandler(t *testing.T) { - dat, err := ioutil.ReadFile("testdata/threespans.dat") + dat, err := os.ReadFile("testdata/threespans.dat") if err != nil { t.Fatalf("Could not find file %s\n", "testdata/threespans.dat") } @@ -37,7 +38,7 @@ func TestSpanHandler(t *testing.T) { r := httptest.NewRequest( "POST", "http://server.local/api/v1/spans", - ioutil.NopCloser( + io.NopCloser( bytes.NewReader(dat))) r.Header.Set("Content-Type", "application/x-thrift") diff --git a/plugins/inputs/zipkin/zipkin.go b/plugins/inputs/zipkin/zipkin.go index 4224fea3d2928..e679de5c47223 100644 --- a/plugins/inputs/zipkin/zipkin.go +++ b/plugins/inputs/zipkin/zipkin.go @@ -79,7 +79,7 @@ func (z Zipkin) SampleConfig() string { // Gather is empty for the zipkin plugin; all gathering is done through // the separate goroutine launched in (*Zipkin).Start() -func (z *Zipkin) Gather(acc telegraf.Accumulator) error { return nil } +func (z *Zipkin) Gather(_ telegraf.Accumulator) error { return nil } // Start launches a separate goroutine for collecting zipkin client http requests, // passing in a telegraf.Accumulator such that data can be collected. @@ -125,6 +125,8 @@ func (z *Zipkin) Stop() { defer z.waitGroup.Wait() defer cancel() + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive z.server.Shutdown(ctx) } diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index 77bef853b7e52..0c0bab279cc7f 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -3,8 +3,8 @@ package zipkin import ( "bytes" "fmt" - "io/ioutil" "net/http" + "os" "testing" "time" @@ -637,7 +637,7 @@ func TestZipkinPlugin(t *testing.T) { } func postThriftData(datafile, address, contentType string) error { - dat, err := ioutil.ReadFile(datafile) + dat, err := os.ReadFile(datafile) if err != nil { return fmt.Errorf("could not read from data file %s", datafile) } diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 5259e25b7163e..82a05bc1778b2 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -12,17 +12,17 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) -var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) +var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w[\w\.\-]*)\s+([\w\.\-]+)`) // Zookeeper is a zookeeper plugin type Zookeeper struct { Servers []string - Timeout internal.Duration + Timeout config.Duration EnableTLS bool `toml:"enable_tls"` EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls @@ -72,9 +72,8 @@ func (z *Zookeeper) dial(ctx context.Context, addr string) (net.Conn, error) { dialer.Deadline = deadline } return tls.DialWithDialer(&dialer, "tcp", addr, z.tlsConfig) - } else { - return dialer.DialContext(ctx, "tcp", addr) } + return dialer.DialContext(ctx, "tcp", addr) } // Gather reads stats from all configured servers accumulates stats @@ -90,11 +89,11 @@ func (z *Zookeeper) Gather(acc telegraf.Accumulator) error { z.initialized = true } - if z.Timeout.Duration < 1*time.Second { - z.Timeout.Duration = defaultTimeout + if z.Timeout < config.Duration(1*time.Second) { + z.Timeout = config.Duration(defaultTimeout) } - ctx, cancel := context.WithTimeout(ctx, z.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(z.Timeout)) defer cancel() if len(z.Servers) == 0 { @@ -108,7 +107,7 @@ func (z *Zookeeper) Gather(acc telegraf.Accumulator) error { } func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { - var zookeeper_state string + var zookeeperState string _, _, err := net.SplitHostPort(address) if err != nil { address = address + ":2181" @@ -123,22 +122,26 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr // Apply deadline to connection deadline, ok := ctx.Deadline() if ok { - c.SetDeadline(deadline) + if err := c.SetDeadline(deadline); err != nil { + return err + } } - fmt.Fprintf(c, "%s\n", "mntr") + if _, err := fmt.Fprintf(c, "%s\n", "mntr"); err != nil { + return err + } rdr := bufio.NewReader(c) scanner := bufio.NewScanner(rdr) service := strings.Split(address, ":") if len(service) != 2 { - return fmt.Errorf("Invalid service address: %s", address) + return fmt.Errorf("invalid service address: %s", address) } fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() - parts := zookeeperFormatRE.FindStringSubmatch(string(line)) + parts := zookeeperFormatRE.FindStringSubmatch(line) if len(parts) != 3 { return fmt.Errorf("unexpected line in mntr response: %q", line) @@ -146,9 +149,9 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr measurement := strings.TrimPrefix(parts[1], "zk_") if measurement == "server_state" { - zookeeper_state = parts[2] + zookeeperState = parts[2] } else { - sValue := string(parts[2]) + sValue := parts[2] iVal, err := strconv.ParseInt(sValue, 10, 64) if err == nil { @@ -167,7 +170,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr tags := map[string]string{ "server": srv, "port": service[1], - "state": zookeeper_state, + "state": zookeeperState, } acc.AddFields("zookeeper", fields, tags) diff --git a/plugins/inputs/zookeeper/zookeeper_test.go b/plugins/inputs/zookeeper/zookeeper_test.go index 37cabbada78fc..bbc2a37cb5cb4 100644 --- a/plugins/inputs/zookeeper/zookeeper_test.go +++ b/plugins/inputs/zookeeper/zookeeper_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestZookeeperGeneratesMetrics(t *testing.T) { +func TestZookeeperGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index a1ac7762156f5..16503960c98c9 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -1,42 +1,54 @@ package all import ( - _ "github.com/influxdata/telegraf/plugins/outputs/amon" - _ "github.com/influxdata/telegraf/plugins/outputs/amqp" - _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" - _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" - _ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" - _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" - _ "github.com/influxdata/telegraf/plugins/outputs/cratedb" + //Blank imports for plugins to register themselves + //_ "github.com/influxdata/telegraf/plugins/outputs/amon" + //_ "github.com/influxdata/telegraf/plugins/outputs/amqp" + //_ "github.com/influxdata/telegraf/plugins/outputs/application_insights" + //_ "github.com/influxdata/telegraf/plugins/outputs/azure_data_explorer" + //_ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" + //_ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" + //_ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" + //_ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch_logs" + //_ "github.com/influxdata/telegraf/plugins/outputs/cratedb" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" - _ "github.com/influxdata/telegraf/plugins/outputs/discard" - _ "github.com/influxdata/telegraf/plugins/outputs/dynatrace" - _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" - _ "github.com/influxdata/telegraf/plugins/outputs/exec" - _ "github.com/influxdata/telegraf/plugins/outputs/execd" + //_ "github.com/influxdata/telegraf/plugins/outputs/discard" + //_ "github.com/influxdata/telegraf/plugins/outputs/dynatrace" + //_ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" + //_ "github.com/influxdata/telegraf/plugins/outputs/exec" + //_ "github.com/influxdata/telegraf/plugins/outputs/execd" _ "github.com/influxdata/telegraf/plugins/outputs/file" - _ "github.com/influxdata/telegraf/plugins/outputs/graphite" - _ "github.com/influxdata/telegraf/plugins/outputs/graylog" - _ "github.com/influxdata/telegraf/plugins/outputs/health" + //_ "github.com/influxdata/telegraf/plugins/outputs/graphite" + //_ "github.com/influxdata/telegraf/plugins/outputs/graylog" + //_ "github.com/influxdata/telegraf/plugins/outputs/health" _ "github.com/influxdata/telegraf/plugins/outputs/http" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" - _ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" - _ "github.com/influxdata/telegraf/plugins/outputs/instrumental" - _ "github.com/influxdata/telegraf/plugins/outputs/kafka" - _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" - _ "github.com/influxdata/telegraf/plugins/outputs/librato" - _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" - _ "github.com/influxdata/telegraf/plugins/outputs/nats" + //_ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" + //_ "github.com/influxdata/telegraf/plugins/outputs/instrumental" + //_ "github.com/influxdata/telegraf/plugins/outputs/kafka" + //_ "github.com/influxdata/telegraf/plugins/outputs/kinesis" + //_ "github.com/influxdata/telegraf/plugins/outputs/librato" + //_ "github.com/influxdata/telegraf/plugins/outputs/logzio" + //_ "github.com/influxdata/telegraf/plugins/outputs/loki" + //_ "github.com/influxdata/telegraf/plugins/outputs/mqtt" + //_ "github.com/influxdata/telegraf/plugins/outputs/nats" _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" - _ "github.com/influxdata/telegraf/plugins/outputs/nsq" - _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" - _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" - _ "github.com/influxdata/telegraf/plugins/outputs/riemann" - _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" - _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" - _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" - _ "github.com/influxdata/telegraf/plugins/outputs/sumologic" - _ "github.com/influxdata/telegraf/plugins/outputs/syslog" - _ "github.com/influxdata/telegraf/plugins/outputs/warp10" - _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" + //_ "github.com/influxdata/telegraf/plugins/outputs/nsq" + //_ "github.com/influxdata/telegraf/plugins/outputs/opentelemetry" + //_ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" + //_ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" + //_ "github.com/influxdata/telegraf/plugins/outputs/riemann" + //_ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" + //_ "github.com/influxdata/telegraf/plugins/outputs/sensu" + //_ "github.com/influxdata/telegraf/plugins/outputs/signalfx" + //_ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" + //_ "github.com/influxdata/telegraf/plugins/outputs/sql" + //_ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" + //_ "github.com/influxdata/telegraf/plugins/outputs/sumologic" + //_ "github.com/influxdata/telegraf/plugins/outputs/syslog" + //_ "github.com/influxdata/telegraf/plugins/outputs/timestream" + //_ "github.com/influxdata/telegraf/plugins/outputs/warp10" + //_ "github.com/influxdata/telegraf/plugins/outputs/wavefront" + //_ "github.com/influxdata/telegraf/plugins/outputs/websocket" + //_ "github.com/influxdata/telegraf/plugins/outputs/yandex_cloud_monitoring" ) diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 10298173f66fb..5bbbba9814e38 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -4,19 +4,20 @@ import ( "bytes" "encoding/json" "fmt" - "log" "net/http" "strings" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) type Amon struct { - ServerKey string - AmonInstance string - Timeout internal.Duration + ServerKey string `toml:"server_key"` + AmonInstance string `toml:"amon_instance"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` client *http.Client } @@ -51,7 +52,7 @@ func (a *Amon) Connect() error { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: a.Timeout.Duration, + Timeout: time.Duration(a.Timeout), } return nil } @@ -76,7 +77,7 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { metricCounter++ } } else { - log.Printf("I! unable to build Metric for %s, skipping\n", m.Name()) + a.Log.Infof("Unable to build Metric for %s, skipping", m.Name()) } } @@ -84,22 +85,22 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { - return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error()) + return fmt.Errorf("unable to marshal TimeSeries, %s", err.Error()) } - req, err := http.NewRequest("POST", a.authenticatedUrl(), bytes.NewBuffer(tsBytes)) + req, err := http.NewRequest("POST", a.authenticatedURL(), bytes.NewBuffer(tsBytes)) if err != nil { - return fmt.Errorf("unable to create http.Request, %s\n", err.Error()) + return fmt.Errorf("unable to create http.Request, %s", err.Error()) } req.Header.Add("Content-Type", "application/json") resp, err := a.client.Do(req) if err != nil { - return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + return fmt.Errorf("error POSTing metrics, %s", err.Error()) } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 209 { - return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) + return fmt.Errorf("received bad status code, %d", resp.StatusCode) } return nil @@ -113,8 +114,7 @@ func (a *Amon) Description() string { return "Configuration for Amon Server to send metrics to." } -func (a *Amon) authenticatedUrl() string { - +func (a *Amon) authenticatedURL() string { return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey) } @@ -134,11 +134,11 @@ func buildMetrics(m telegraf.Metric) (map[string]Point, error) { func (p *Point) setValue(v interface{}) error { switch d := v.(type) { case int: - p[1] = float64(int(d)) + p[1] = float64(d) case int32: - p[1] = float64(int32(d)) + p[1] = float64(d) case int64: - p[1] = float64(int64(d)) + p[1] = float64(d) case float32: p[1] = float64(d) case float64: diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 0c7e04da7e14d..95da1f99b0f9f 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -2,12 +2,11 @@ package amqp import ( "bytes" - "fmt" - "log" "strings" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -30,7 +29,7 @@ func (a *externalAuth) Mechanism() string { } func (a *externalAuth) Response() string { - return fmt.Sprintf("\000") + return "\000" } type AMQP struct { @@ -52,9 +51,10 @@ type AMQP struct { RetentionPolicy string `toml:"retention_policy"` // deprecated in 1.7; use headers Precision string `toml:"precision"` // deprecated; has no effect Headers map[string]string `toml:"headers"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` UseBatchFormat bool `toml:"use_batch_format"` ContentEncoding string `toml:"content_encoding"` + Log telegraf.Logger `toml:"-"` tls.ClientConfig serializer serializers.Serializer @@ -267,7 +267,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { } if q.sentMessages >= q.MaxMessages && q.MaxMessages > 0 { - log.Printf("D! Output [amqp] sent MaxMessages; closing connection") + q.Log.Debug("Sent MaxMessages; closing connection") q.client.Close() q.client = nil } @@ -296,22 +296,22 @@ func (q *AMQP) publish(key string, body []byte) error { func (q *AMQP) serialize(metrics []telegraf.Metric) ([]byte, error) { if q.UseBatchFormat { return q.serializer.SerializeBatch(metrics) - } else { - var buf bytes.Buffer - for _, metric := range metrics { - octets, err := q.serializer.Serialize(metric) - if err != nil { - log.Printf("D! [outputs.amqp] Could not serialize metric: %v", err) - continue - } - _, err = buf.Write(octets) - if err != nil { - return nil, err - } + } + + var buf bytes.Buffer + for _, metric := range metrics { + octets, err := q.serializer.Serialize(metric) + if err != nil { + q.Log.Debugf("Could not serialize metric: %v", err) + continue + } + _, err = buf.Write(octets) + if err != nil { + return nil, err } - body := buf.Bytes() - return body, nil } + body := buf.Bytes() + return body, nil } func (q *AMQP) makeClientConfig() (*ClientConfig, error) { @@ -320,7 +320,7 @@ func (q *AMQP) makeClientConfig() (*ClientConfig, error) { exchangeType: q.ExchangeType, exchangePassive: q.ExchangePassive, encoding: q.ContentEncoding, - timeout: q.Timeout.Duration, + timeout: time.Duration(q.Timeout), } switch q.ExchangeDurability { @@ -398,7 +398,7 @@ func init() { AuthMethod: DefaultAuthMethod, Database: DefaultDatabase, RetentionPolicy: DefaultRetentionPolicy, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), connect: connect, } }) diff --git a/plugins/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go index 32a9145281e48..05c25ea515194 100644 --- a/plugins/outputs/amqp/amqp_test.go +++ b/plugins/outputs/amqp/amqp_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/streadway/amqp" "github.com/stretchr/testify/require" ) @@ -15,8 +15,6 @@ type MockClient struct { PublishCallCount int CloseCallCount int - - t *testing.T } func (c *MockClient) Publish(key string, body []byte) error { @@ -29,10 +27,6 @@ func (c *MockClient) Close() error { return c.CloseF() } -func MockConnect(config *ClientConfig) (Client, error) { - return &MockClient{}, nil -} - func NewMockClient() Client { return &MockClient{ PublishF: func(key string, body []byte) error { @@ -59,24 +53,24 @@ func TestConnect(t *testing.T) { AuthMethod: DefaultAuthMethod, Database: DefaultDatabase, RetentionPolicy: DefaultRetentionPolicy, - Timeout: internal.Duration{Duration: time.Second * 5}, - connect: func(config *ClientConfig) (Client, error) { + Timeout: config.Duration(time.Second * 5), + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config - require.Equal(t, []string{DefaultURL}, config.brokers) - require.Equal(t, "", config.exchange) - require.Equal(t, "topic", config.exchangeType) - require.Equal(t, false, config.exchangePassive) - require.Equal(t, true, config.exchangeDurable) - require.Equal(t, amqp.Table(nil), config.exchangeArguments) + cfg := output.config + require.Equal(t, []string{DefaultURL}, cfg.brokers) + require.Equal(t, "", cfg.exchange) + require.Equal(t, "topic", cfg.exchangeType) + require.Equal(t, false, cfg.exchangePassive) + require.Equal(t, true, cfg.exchangeDurable) + require.Equal(t, amqp.Table(nil), cfg.exchangeArguments) require.Equal(t, amqp.Table{ "database": DefaultDatabase, "retention_policy": DefaultRetentionPolicy, - }, config.headers) - require.Equal(t, amqp.Transient, config.deliveryMode) + }, cfg.headers) + require.Equal(t, amqp.Transient, cfg.deliveryMode) require.NoError(t, err) }, }, @@ -86,15 +80,15 @@ func TestConnect(t *testing.T) { Headers: map[string]string{ "foo": "bar", }, - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config + cfg := output.config require.Equal(t, amqp.Table{ "foo": "bar", - }, config.headers) + }, cfg.headers) require.NoError(t, err) }, }, @@ -104,15 +98,15 @@ func TestConnect(t *testing.T) { ExchangeArguments: map[string]string{ "foo": "bar", }, - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config + cfg := output.config require.Equal(t, amqp.Table{ "foo": "bar", - }, config.exchangeArguments) + }, cfg.exchangeArguments) require.NoError(t, err) }, }, @@ -122,18 +116,18 @@ func TestConnect(t *testing.T) { URL: "amqp://foo:bar@localhost", Username: "telegraf", Password: "pa$$word", - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config + cfg := output.config require.Equal(t, []amqp.Authentication{ &amqp.PlainAuth{ Username: "telegraf", Password: "pa$$word", }, - }, config.auth) + }, cfg.auth) require.NoError(t, err) }, @@ -142,13 +136,13 @@ func TestConnect(t *testing.T) { name: "url support", output: &AMQP{ URL: DefaultURL, - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config - require.Equal(t, []string{DefaultURL}, config.brokers) + cfg := output.config + require.Equal(t, []string{DefaultURL}, cfg.brokers) require.NoError(t, err) }, }, diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md index 34017a89f0bab..b23f1affef06f 100644 --- a/plugins/outputs/application_insights/README.md +++ b/plugins/outputs/application_insights/README.md @@ -20,7 +20,7 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur ## Context Tag Sources add Application Insights context tags to a tag value. ## ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go + ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go # [outputs.application_insights.context_tag_sources] # "ai.cloud.role" = "kubernetes_container_name" # "ai.cloud.roleInstance" = "kubernetes_pod_name" diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 3ab16af6fc313..54635ee7df6b1 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -2,15 +2,14 @@ package application_insights import ( "fmt" - "log" "math" "time" "unsafe" - "github.com/Microsoft/ApplicationInsights-Go/appinsights" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/microsoft/ApplicationInsights-Go/appinsights" ) type TelemetryTransmitter interface { @@ -23,23 +22,18 @@ type DiagnosticsMessageSubscriber interface { } type ApplicationInsights struct { - InstrumentationKey string - EndpointURL string - Timeout internal.Duration - EnableDiagnosticLogging bool - ContextTagSources map[string]string - diagMsgSubscriber DiagnosticsMessageSubscriber - transmitter TelemetryTransmitter - diagMsgListener appinsights.DiagnosticsMessageListener + InstrumentationKey string `toml:"instrumentation_key"` + EndpointURL string `toml:"endpoint_url"` + Timeout config.Duration `toml:"timeout"` + EnableDiagnosticLogging bool `toml:"enable_diagnostic_logging"` + ContextTagSources map[string]string `toml:"context_tag_sources"` + Log telegraf.Logger `toml:"-"` + + diagMsgSubscriber DiagnosticsMessageSubscriber + transmitter TelemetryTransmitter + diagMsgListener appinsights.DiagnosticsMessageListener } -const ( - Error = "E! " - Warning = "W! " - Info = "I! " - Debug = "D! " -) - var ( sampleConfig = ` ## Instrumentation key of the Application Insights resource. @@ -57,7 +51,7 @@ var ( ## Context Tag Sources add Application Insights context tags to a tag value. ## ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go + ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go # [outputs.application_insights.context_tag_sources] # "ai.cloud.role" = "kubernetes_container_name" # "ai.cloud.roleInstance" = "kubernetes_pod_name" @@ -76,7 +70,7 @@ func (a *ApplicationInsights) Description() string { func (a *ApplicationInsights) Connect() error { if a.InstrumentationKey == "" { - return fmt.Errorf("Instrumentation key is required") + return fmt.Errorf("instrumentation key is required") } if a.transmitter == nil { @@ -85,7 +79,7 @@ func (a *ApplicationInsights) Connect() error { if a.EnableDiagnosticLogging && a.diagMsgSubscriber != nil { a.diagMsgListener = a.diagMsgSubscriber.Subscribe(func(msg string) error { - logOutputMsg(Info, "%s", msg) + a.Log.Info(msg) return nil }) } @@ -117,9 +111,9 @@ func (a *ApplicationInsights) Close() error { select { case <-a.transmitter.Close(): - logOutputMsg(Info, "Closed") - case <-time.After(a.Timeout.Duration): - logOutputMsg(Warning, "Close operation timed out after %v", a.Timeout.Duration) + a.Log.Info("Closed") + case <-time.After(time.Duration(a.Timeout)): + a.Log.Warnf("Close operation timed out after %v", time.Duration(a.Timeout)) } return nil @@ -139,15 +133,12 @@ func (a *ApplicationInsights) createTelemetry(metric telegraf.Metric) []appinsig telemetry := a.createSimpleMetricTelemetry(metric, "value", false) if telemetry != nil { return []appinsights.Telemetry{telemetry} - } else { - return nil } - } else { - // AppInsights does not support multi-dimensional metrics at the moment, so we need to disambiguate resulting telemetry - // by adding field name as the telemetry name suffix - retval := a.createTelemetryForUnusedFields(metric, nil) - return retval + return nil } + // AppInsights does not support multi-dimensional metrics at the moment, so we need to disambiguate resulting telemetry + // by adding field name as the telemetry name suffix + return a.createTelemetryForUnusedFields(metric, nil) } func (a *ApplicationInsights) createSimpleMetricTelemetry(metric telegraf.Metric, fieldName string, useFieldNameInTelemetryName bool) *appinsights.MetricTelemetry { @@ -231,8 +222,8 @@ func (a *ApplicationInsights) addContextTags(metric telegraf.Metric, telemetry a func getFloat64TelemetryPropertyValue( candidateFields []string, metric telegraf.Metric, - usedFields *[]string) (float64, error) { - + usedFields *[]string, +) (float64, error) { for _, fieldName := range candidateFields { fieldValue, found := metric.GetField(fieldName) if !found { @@ -251,14 +242,14 @@ func getFloat64TelemetryPropertyValue( return metricValue, nil } - return 0.0, fmt.Errorf("No field from the candidate list was found in the metric") + return 0.0, fmt.Errorf("no field from the candidate list was found in the metric") } func getIntTelemetryPropertyValue( candidateFields []string, metric telegraf.Metric, - usedFields *[]string) (int, error) { - + usedFields *[]string, +) (int, error) { for _, fieldName := range candidateFields { fieldValue, found := metric.GetField(fieldName) if !found { @@ -277,7 +268,7 @@ func getIntTelemetryPropertyValue( return metricValue, nil } - return 0, fmt.Errorf("No field from the candidate list was found in the metric") + return 0, fmt.Errorf("no field from the candidate list was found in the metric") } func contains(set []string, val string) bool { @@ -320,11 +311,11 @@ func toInt(value interface{}) (int, error) { case uint64: if is32Bit { if v > math.MaxInt32 { - return 0, fmt.Errorf("Value [%d] out of range of 32-bit integers", v) + return 0, fmt.Errorf("value [%d] out of range of 32-bit integers", v) } } else { if v > math.MaxInt64 { - return 0, fmt.Errorf("Value [%d] out of range of 64-bit integers", v) + return 0, fmt.Errorf("value [%d] out of range of 64-bit integers", v) } } @@ -333,7 +324,7 @@ func toInt(value interface{}) (int, error) { case int64: if is32Bit { if v > math.MaxInt32 || v < math.MinInt32 { - return 0, fmt.Errorf("Value [%d] out of range of 32-bit integers", v) + return 0, fmt.Errorf("value [%d] out of range of 32-bit integers", v) } } @@ -343,14 +334,10 @@ func toInt(value interface{}) (int, error) { return 0.0, fmt.Errorf("[%s] cannot be converted to an int value", value) } -func logOutputMsg(level string, format string, v ...interface{}) { - log.Printf(level+"[outputs.application_insights] "+format, v...) -} - func init() { outputs.Add("application_insights", func() telegraf.Output { return &ApplicationInsights{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), diagMsgSubscriber: diagnosticsMessageSubscriber{}, // It is very common to set Cloud.RoleName and Cloud.RoleInstance context properties, hence initial capacity of two ContextTagSources: make(map[string]string, 2), diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 5a017823c02db..b685f6c318d05 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -5,10 +5,12 @@ import ( "testing" "time" - "github.com/Microsoft/ApplicationInsights-Go/appinsights" + "github.com/influxdata/telegraf/testutil" + + "github.com/microsoft/ApplicationInsights-Go/appinsights" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/application_insights/mocks" "github.com/stretchr/testify/assert" @@ -24,7 +26,8 @@ func TestConnectFailsIfNoIkey(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, // Very long timeout to ensure we do not rely on timeouts for closing the transmitter - Timeout: internal.Duration{Duration: time.Hour}, + Timeout: config.Duration(time.Hour), + Log: testutil.Logger{}, } err := ai.Connect() @@ -39,7 +42,8 @@ func TestOutputCloseTimesOut(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, - Timeout: internal.Duration{Duration: time.Millisecond * 50}, + Timeout: config.Duration(time.Millisecond * 50), + Log: testutil.Logger{}, } err := ai.Close() @@ -63,10 +67,11 @@ func TestCloseRemovesDiagMsgListener(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, - Timeout: internal.Duration{Duration: time.Hour}, + Timeout: config.Duration(time.Hour), EnableDiagnosticLogging: true, diagMsgSubscriber: diagMsgSubscriber, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } err := ai.Connect() @@ -139,20 +144,20 @@ func TestAggregateMetricCreated(t *testing.T) { transmitter.On("Track", mock.Anything) metricName := "ShouldBeAggregateMetric" - m, err := metric.New( + m := metric.New( metricName, nil, // tags tt.fields, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -197,20 +202,20 @@ func TestSimpleMetricCreated(t *testing.T) { transmitter.On("Track", mock.Anything) metricName := "ShouldBeSimpleMetric" - m, err := metric.New( + m := metric.New( metricName, nil, // tags tt.fields, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -267,20 +272,20 @@ func TestTagsAppliedToTelemetry(t *testing.T) { transmitter.On("Track", mock.Anything) metricName := "ShouldBeSimpleMetric" - m, err := metric.New( + m := metric.New( metricName, tt.tags, tt.fields, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -303,13 +308,12 @@ func TestContextTagsSetOnSimpleTelemetry(t *testing.T) { transmitter := new(mocks.Transmitter) transmitter.On("Track", mock.Anything) - m, err := metric.New( + m := metric.New( "SimpleMetric", map[string]string{"kubernetes_container_name": "atcsvc", "kubernetes_pod_name": "bunkie17554"}, map[string]interface{}{"value": 23.0}, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, @@ -319,9 +323,10 @@ func TestContextTagsSetOnSimpleTelemetry(t *testing.T) { "ai.cloud.roleInstance": "kubernetes_pod_name", "ai.user.id": "nonexistent", }, + Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -340,13 +345,12 @@ func TestContextTagsSetOnAggregateTelemetry(t *testing.T) { transmitter := new(mocks.Transmitter) transmitter.On("Track", mock.Anything) - m, err := metric.New( + m := metric.New( "AggregateMetric", map[string]string{"kubernetes_container_name": "atcsvc", "kubernetes_pod_name": "bunkie17554"}, map[string]interface{}{"value": 23.0, "count": 5}, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, @@ -356,9 +360,10 @@ func TestContextTagsSetOnAggregateTelemetry(t *testing.T) { "ai.cloud.roleInstance": "kubernetes_pod_name", "ai.user.id": "nonexistent", }, + Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -388,7 +393,6 @@ func verifyAggregateTelemetry( countField string, telemetry *appinsights.AggregateMetricTelemetry, ) { - verifyAggregateField := func(fieldName string, telemetryValue float64) { metricRawFieldValue, found := metric.Fields()[fieldName] if !found { @@ -417,7 +421,6 @@ func verifySimpleTelemetry( expectedTelemetryName string, telemetry *appinsights.MetricTelemetry, ) { - assert.Equal(expectedTelemetryName, telemetry.Name, "Telemetry name is not what was expected") assert.EqualValues(metric.Fields()[valueField], telemetry.Value, "Telemetry value does not match metric value field") assert.Equal(metric.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match") @@ -452,15 +455,6 @@ func findTransmittedTelemetry(transmitter *mocks.Transmitter, telemetryName stri return nil } -func keys(m map[string]string) []string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - return keys -} - func assertMapContains(assert *assert.Assertions, expected, actual map[string]string) { if expected == nil && actual == nil { return diff --git a/plugins/outputs/application_insights/diagnostic_message_subscriber.go b/plugins/outputs/application_insights/diagnostic_message_subscriber.go index 78993191096dc..a5b11671a1bf1 100644 --- a/plugins/outputs/application_insights/diagnostic_message_subscriber.go +++ b/plugins/outputs/application_insights/diagnostic_message_subscriber.go @@ -1,7 +1,7 @@ package application_insights import ( - "github.com/Microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights" ) type diagnosticsMessageSubscriber struct { diff --git a/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go b/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go index ba7007d4061d5..d360a29e5618b 100644 --- a/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go +++ b/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go @@ -1,7 +1,7 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. package mocks -import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" +import appinsights "github.com/microsoft/ApplicationInsights-Go/appinsights" import mock "github.com/stretchr/testify/mock" diff --git a/plugins/outputs/application_insights/mocks/transmitter.go b/plugins/outputs/application_insights/mocks/transmitter.go index 5cc56fbb1ee1f..6b26f84da2fc2 100644 --- a/plugins/outputs/application_insights/mocks/transmitter.go +++ b/plugins/outputs/application_insights/mocks/transmitter.go @@ -1,7 +1,7 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. package mocks -import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" +import appinsights "github.com/microsoft/ApplicationInsights-Go/appinsights" import mock "github.com/stretchr/testify/mock" diff --git a/plugins/outputs/application_insights/transmitter.go b/plugins/outputs/application_insights/transmitter.go index 024ea32809fb0..a16039ad1a24f 100644 --- a/plugins/outputs/application_insights/transmitter.go +++ b/plugins/outputs/application_insights/transmitter.go @@ -1,7 +1,7 @@ package application_insights import ( - "github.com/Microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights" ) type Transmitter struct { @@ -11,11 +11,11 @@ type Transmitter struct { func NewTransmitter(ikey string, endpointURL string) *Transmitter { if len(endpointURL) == 0 { return &Transmitter{client: appinsights.NewTelemetryClient(ikey)} - } else { - telemetryConfig := appinsights.NewTelemetryConfiguration(ikey) - telemetryConfig.EndpointUrl = endpointURL - return &Transmitter{client: appinsights.NewTelemetryClientFromConfig(telemetryConfig)} } + + telemetryConfig := appinsights.NewTelemetryConfiguration(ikey) + telemetryConfig.EndpointUrl = endpointURL + return &Transmitter{client: appinsights.NewTelemetryClientFromConfig(telemetryConfig)} } func (t *Transmitter) Track(telemetry appinsights.Telemetry) { diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md new file mode 100644 index 0000000000000..4ae5bf7139924 --- /dev/null +++ b/plugins/outputs/azure_data_explorer/README.md @@ -0,0 +1,195 @@ +# Azure Data Explorer output plugin + +This plugin writes data collected by any of the Telegraf input plugins to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +Azure Data Explorer is a distributed, columnar store, purpose built for any type of logs, metrics and time series data. + +## Pre-requisites: +- [Create Azure Data Explorer cluster and database](https://docs.microsoft.com/en-us/azure/data-explorer/create-cluster-database-portal) +- VM/compute or container to host Telegraf - it could be hosted locally where an app/service to be monitored is deployed or remotely on a dedicated monitoring compute/container. + + +## Configuration: + +```toml +[[outputs.azure_data_explorer]] + ## The URI property of the Azure Data Explorer resource on Azure + ## ex: https://myadxresource.australiasoutheast.kusto.windows.net + # endpoint_url = "" + + ## The Azure Data Explorer database that the metrics will be ingested into. + ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. + ## ex: "exampledatabase" + # database = "" + + ## Timeout for Azure Data Explorer operations + # timeout = "20s" + + ## Type of metrics grouping used when pushing to Azure Data Explorer. + ## Default is "TablePerMetric" for one table per different metric. + ## For more information, please check the plugin README. + # metrics_grouping_type = "TablePerMetric" + + ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). + # table_name = "" +``` + +## Metrics Grouping + +Metrics can be grouped in two ways to be sent to Azure Data Explorer. To specify which metric grouping type the plugin should use, the respective value should be given to the `metrics_grouping_type` in the config file. If no value is given to `metrics_grouping_type`, by default, the metrics will be grouped using `TablePerMetric`. + +### TablePerMetric + +The plugin will group the metrics by the metric name, and will send each group of metrics to an Azure Data Explorer table. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). + +The table name will match the `name` property of the metric, this means that the name of the metric should comply with the Azure Data Explorer table naming constraints in case you plan to add a prefix to the metric name. + + +### SingleTable + +The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` in the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). + + +## Tables Schema + +The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command generated by the plugin would be like the following: +``` +.create-merge table ['table-name'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime) +``` + +The corresponding table mapping would be like the following: +``` +.create-or-alter table ['table-name'] ingestion json mapping 'table-name_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]' +``` + +**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. + +## Authentiation + +### Supported Authentication Methods +This plugin provides several types of authentication. The plugin will check the existence of several specific environment variables, and consequently will choose the right method. + +These methods are: + + +1. AAD Application Tokens (Service Principals with secrets or certificates). + + For guidance on how to create and register an App in Azure Active Directory check [this article](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application), and for more information on the Service Principals check [this article](https://docs.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals). + + +2. AAD User Tokens + - Allows Telegraf to authenticate like a user. This method is mainly used + for development purposes only. + +3. Managed Service Identity (MSI) token + - If you are running Telegraf from Azure VM or infrastructure, then this is the prefered authentication method. + +[principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects + +Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will allow the plugin to create the required tables and ingest data into it. + +### Configurations of the chosen Authentication Method + +The plugin will authenticate using the first available of the +following configurations, **it's important to understand that the assessment, and consequently choosing the authentication method, will happen in order as below**: + +1. **Client Credentials**: Azure AD Application ID and Secret. + + Set the following environment variables: + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + +2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + +3. **Resource Owner Password**: Azure AD User and Password. This grant type is + *not recommended*, use device login instead if you need interactive login. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + +4. **Azure Managed Service Identity**: Delegate credential management to the + platform. Requires that code is running in Azure, e.g. on a VM. All + configuration is handled by Azure. See [Azure Managed Service Identity][msi] + for more details. Only available when using the [Azure Resource Manager][arm]. + +[msi]: https://docs.microsoft.com/en-us/azure/active-directory/msi-overview +[arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview + + +## Querying data collected in Azure Data Explorer +Examples of data transformations and queries that would be useful to gain insights - +1. **Data collected using SQL input plugin** + + Sample SQL metrics data - + + name | tags | timestamp | fields + -----|------|-----------|------- + sqlserver_database_io|{"database_name":"azure-sql-db2","file_type":"DATA","host":"adx-vm","logical_filename":"tempdev","measurement_db_type":"AzureSQLDB","physical_filename":"tempdb.mdf","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server"}|2021-09-09T13:51:20Z|{"current_size_mb":16,"database_id":2,"file_id":1,"read_bytes":2965504,"read_latency_ms":68,"reads":47,"rg_read_stall_ms":42,"rg_write_stall_ms":0,"space_used_mb":0,"write_bytes":1220608,"write_latency_ms":103,"writes":149} + sqlserver_waitstats|{"database_name":"azure-sql-db2","host":"adx-vm","measurement_db_type":"AzureSQLDB","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server","wait_category":"Worker Thread","wait_type":"THREADPOOL"}|2021-09-09T13:51:20Z|{"max_wait_time_ms":15,"resource_wait_ms":4469,"signal_wait_time_ms":0,"wait_time_ms":4469,"waiting_tasks_count":1464} + + + Since collected metrics object is of complex type so "fields" and "tags" are stored as dynamic data type, multiple ways to query this data- + + - **Query JSON attributes directly**: Azure Data Explorer provides an ability to query JSON data in raw format without parsing it, so JSON attributes can be queried directly in following way - + ``` + Tablename + | where name == "sqlserver_azure_db_resource_stats" and todouble(fields.avg_cpu_percent) > 7 + ``` + ``` + Tablename + | distinct tostring(tags.database_name) + ``` + **Note** - This approach could have performance impact in case of large volumes of data, use belwo mentioned approach for such cases. + + - **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: Transform dynamic data type columns using update policy. This is the recommended performant way for querying over large volumes of data compared to querying directly over JSON attributes. + + ``` + // Function to transform data + .create-or-alter function Transform_TargetTableName() { + SourceTableName + | mv-apply fields on (extend key = tostring(bag_keys(fields)[0])) + | project fieldname=key, value=todouble(fields[key]), name, tags, timestamp + } + + // Create destination table with above query's results schema (if it doesn't exist already) + .set-or-append TargetTableName <| Transform_TargetTableName() | limit 0 + + // Apply update policy on destination table + .alter table TargetTableName policy update + @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": true, "PropagateIngestionProperties": false}]' + ``` + +2. **Data collected using syslog input plugin** + + Sample syslog data - + + name | tags | timestamp | fields + -----|------|-----------|------- + syslog|{"appname":"azsecmond","facility":"user","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:36:44Z|{"facility_code":1,"message":" 2021/09/20 14:36:44.890110 Failed to connect to mdsd: dial unix /var/run/mdsd/default_djson.socket: connect: no such file or directory","procid":"2184","severity_code":6,"timestamp":"1632148604890477000","version":1} + syslog|{"appname":"CRON","facility":"authpriv","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:37:01Z|{"facility_code":10,"message":" pam_unix(cron:session): session opened for user root by (uid=0)","procid":"26446","severity_code":6,"timestamp":"1632148621120781000","version":1} + + There are multiple ways to flatten dynamic columns using 'extend' or 'bag_unpack' operator. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' + + - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator - This is the recommended approach compared to 'bag_unpack' as it is faster and robust. Even if schema changes, it will not break queries or dashboards. + ``` + Tablenmae + | extend facility_code=toint(fields.facility_code), message=tostring(fields.message), procid= tolong(fields.procid), severity_code=toint(fields.severity_code), + SysLogTimestamp=unixtime_nanoseconds_todatetime(tolong(fields.timestamp)), version= todouble(fields.version), + appname= tostring(tags.appname), facility= tostring(tags.facility),host= tostring(tags.host), hostname=tostring(tags.hostname), severity=tostring(tags.severity) + | project-away fields, tags + ``` + - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic type columns automatically. This method could lead to issues if source schema changes as its dynamically expanding columns. + ``` + Tablename + | evaluate bag_unpack(tags, columnsConflict='replace_source') + | evaluate bag_unpack(fields, columnsConflict='replace_source') + ``` + diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go new file mode 100644 index 0000000000000..6d411fd05c3b9 --- /dev/null +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -0,0 +1,255 @@ +package azure_data_explorer + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/Azure/azure-kusto-go/kusto" + "github.com/Azure/azure-kusto-go/kusto/ingest" + "github.com/Azure/azure-kusto-go/kusto/unsafe" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/plugins/serializers/json" +) + +type AzureDataExplorer struct { + Endpoint string `toml:"endpoint_url"` + Database string `toml:"database"` + Log telegraf.Logger `toml:"-"` + Timeout config.Duration `toml:"timeout"` + MetricsGrouping string `toml:"metrics_grouping_type"` + TableName string `toml:"table_name"` + client localClient + ingesters map[string]localIngestor + serializer serializers.Serializer + createIngestor ingestorFactory +} + +const ( + tablePerMetric = "tablepermetric" + singleTable = "singletable" +) + +type localIngestor interface { + FromReader(ctx context.Context, reader io.Reader, options ...ingest.FileOption) (*ingest.Result, error) +} + +type localClient interface { + Mgmt(ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) +} + +type ingestorFactory func(localClient, string, string) (localIngestor, error) + +const createTableCommand = `.create-merge table ['%s'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime);` +const createTableMappingCommand = `.create-or-alter table ['%s'] ingestion json mapping '%s_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]'` + +func (adx *AzureDataExplorer) Description() string { + return "Sends metrics to Azure Data Explorer" +} + +func (adx *AzureDataExplorer) SampleConfig() string { + return ` + ## Azure Data Exlorer cluster endpoint + ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" + endpoint_url = "" + + ## The Azure Data Explorer database that the metrics will be ingested into. + ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. + ## ex: "exampledatabase" + database = "" + + ## Timeout for Azure Data Explorer operations + # timeout = "20s" + + ## Type of metrics grouping used when pushing to Azure Data Explorer. + ## Default is "TablePerMetric" for one table per different metric. + ## For more information, please check the plugin README. + # metrics_grouping_type = "TablePerMetric" + + ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). + # table_name = "" + +` +} + +func (adx *AzureDataExplorer) Connect() error { + authorizer, err := auth.NewAuthorizerFromEnvironmentWithResource(adx.Endpoint) + if err != nil { + return err + } + authorization := kusto.Authorization{ + Authorizer: authorizer, + } + client, err := kusto.New(adx.Endpoint, authorization) + + if err != nil { + return err + } + adx.client = client + adx.ingesters = make(map[string]localIngestor) + adx.createIngestor = createRealIngestor + + return nil +} + +func (adx *AzureDataExplorer) Close() error { + adx.client = nil + adx.ingesters = nil + + return nil +} + +func (adx *AzureDataExplorer) Write(metrics []telegraf.Metric) error { + if adx.MetricsGrouping == tablePerMetric { + return adx.writeTablePerMetric(metrics) + } + return adx.writeSingleTable(metrics) +} + +func (adx *AzureDataExplorer) writeTablePerMetric(metrics []telegraf.Metric) error { + tableMetricGroups := make(map[string][]byte) + // Group metrics by name and serialize them + for _, m := range metrics { + tableName := m.Name() + metricInBytes, err := adx.serializer.Serialize(m) + if err != nil { + return err + } + if existingBytes, ok := tableMetricGroups[tableName]; ok { + tableMetricGroups[tableName] = append(existingBytes, metricInBytes...) + } else { + tableMetricGroups[tableName] = metricInBytes + } + } + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Duration(adx.Timeout)) + defer cancel() + + // Push the metrics for each table + format := ingest.FileFormat(ingest.JSON) + for tableName, tableMetrics := range tableMetricGroups { + if err := adx.pushMetrics(ctx, format, tableName, tableMetrics); err != nil { + return err + } + } + + return nil +} + +func (adx *AzureDataExplorer) writeSingleTable(metrics []telegraf.Metric) error { + //serialise each metric in metrics - store in byte[] + metricsArray := make([]byte, 0) + for _, m := range metrics { + metricsInBytes, err := adx.serializer.Serialize(m) + if err != nil { + return err + } + metricsArray = append(metricsArray, metricsInBytes...) + } + + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Duration(adx.Timeout)) + defer cancel() + + //push metrics to a single table + format := ingest.FileFormat(ingest.JSON) + err := adx.pushMetrics(ctx, format, adx.TableName, metricsArray) + return err +} + +func (adx *AzureDataExplorer) pushMetrics(ctx context.Context, format ingest.FileOption, tableName string, metricsArray []byte) error { + ingestor, err := adx.getIngestor(ctx, tableName) + if err != nil { + return err + } + + reader := bytes.NewReader(metricsArray) + mapping := ingest.IngestionMappingRef(fmt.Sprintf("%s_mapping", tableName), ingest.JSON) + if _, err := ingestor.FromReader(ctx, reader, format, mapping); err != nil { + adx.Log.Errorf("sending ingestion request to Azure Data Explorer for table %q failed: %v", tableName, err) + } + return nil +} + +func (adx *AzureDataExplorer) getIngestor(ctx context.Context, tableName string) (localIngestor, error) { + ingestor := adx.ingesters[tableName] + + if ingestor == nil { + if err := adx.createAzureDataExplorerTable(ctx, tableName); err != nil { + return nil, fmt.Errorf("creating table for %q failed: %v", tableName, err) + } + //create a new ingestor client for the table + tempIngestor, err := adx.createIngestor(adx.client, adx.Database, tableName) + if err != nil { + return nil, fmt.Errorf("creating ingestor for %q failed: %v", tableName, err) + } + adx.ingesters[tableName] = tempIngestor + ingestor = tempIngestor + } + return ingestor, nil +} + +func (adx *AzureDataExplorer) createAzureDataExplorerTable(ctx context.Context, tableName string) error { + createStmt := kusto.NewStmt("", kusto.UnsafeStmt(unsafe.Stmt{Add: true, SuppressWarning: true})).UnsafeAdd(fmt.Sprintf(createTableCommand, tableName)) + if _, err := adx.client.Mgmt(ctx, adx.Database, createStmt); err != nil { + return err + } + + createTableMappingstmt := kusto.NewStmt("", kusto.UnsafeStmt(unsafe.Stmt{Add: true, SuppressWarning: true})).UnsafeAdd(fmt.Sprintf(createTableMappingCommand, tableName, tableName)) + if _, err := adx.client.Mgmt(ctx, adx.Database, createTableMappingstmt); err != nil { + return err + } + + return nil +} + +func (adx *AzureDataExplorer) Init() error { + if adx.Endpoint == "" { + return errors.New("Endpoint configuration cannot be empty") + } + if adx.Database == "" { + return errors.New("Database configuration cannot be empty") + } + + adx.MetricsGrouping = strings.ToLower(adx.MetricsGrouping) + if adx.MetricsGrouping == singleTable && adx.TableName == "" { + return errors.New("Table name cannot be empty for SingleTable metrics grouping type") + } + if adx.MetricsGrouping == "" { + adx.MetricsGrouping = tablePerMetric + } + if !(adx.MetricsGrouping == singleTable || adx.MetricsGrouping == tablePerMetric) { + return errors.New("Metrics grouping type is not valid") + } + + serializer, err := json.NewSerializer(time.Second) + if err != nil { + return err + } + adx.serializer = serializer + return nil +} + +func init() { + outputs.Add("azure_data_explorer", func() telegraf.Output { + return &AzureDataExplorer{ + Timeout: config.Duration(20 * time.Second), + } + }) +} + +func createRealIngestor(client localClient, database string, tableName string) (localIngestor, error) { + ingestor, err := ingest.New(client.(*kusto.Client), database, tableName) + if ingestor != nil { + return ingestor, nil + } + return nil, err +} diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go new file mode 100644 index 0000000000000..f85d074cb1f6f --- /dev/null +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go @@ -0,0 +1,200 @@ +package azure_data_explorer + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "testing" + "time" + + "github.com/Azure/azure-kusto-go/kusto" + "github.com/Azure/azure-kusto-go/kusto/ingest" + "github.com/influxdata/telegraf" + telegrafJson "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const createTableCommandExpected = `.create-merge table ['%s'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime);` +const createTableMappingCommandExpected = `.create-or-alter table ['%s'] ingestion json mapping '%s_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]'` + +func TestWrite(t *testing.T) { + testCases := []struct { + name string + inputMetric []telegraf.Metric + client *fakeClient + createIngestor ingestorFactory + metricsGrouping string + tableName string + expected map[string]interface{} + expectedWriteError string + }{ + { + name: "Valid metric", + inputMetric: testutil.MockMetrics(), + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + f.queries = append(f.queries, query.String()) + return &kusto.RowIterator{}, nil + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: tablePerMetric, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + }, + { + name: "Error in Mgmt", + inputMetric: testutil.MockMetrics(), + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + return nil, errors.New("Something went wrong") + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: tablePerMetric, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + expectedWriteError: "creating table for \"test1\" failed: Something went wrong", + }, + { + name: "SingleTable metric grouping type", + inputMetric: testutil.MockMetrics(), + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + f.queries = append(f.queries, query.String()) + return &kusto.RowIterator{}, nil + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: singleTable, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + }, + } + + for _, tC := range testCases { + t.Run(tC.name, func(t *testing.T) { + serializer, err := telegrafJson.NewSerializer(time.Second) + require.NoError(t, err) + + plugin := AzureDataExplorer{ + Endpoint: "someendpoint", + Database: "databasename", + Log: testutil.Logger{}, + MetricsGrouping: tC.metricsGrouping, + TableName: tC.tableName, + client: tC.client, + ingesters: map[string]localIngestor{}, + createIngestor: tC.createIngestor, + serializer: serializer, + } + + errorInWrite := plugin.Write(testutil.MockMetrics()) + + if tC.expectedWriteError != "" { + require.EqualError(t, errorInWrite, tC.expectedWriteError) + } else { + require.NoError(t, errorInWrite) + + expectedNameOfMetric := tC.expected["metricName"].(string) + expectedNameOfTable := expectedNameOfMetric + createdIngestor := plugin.ingesters[expectedNameOfMetric] + + if tC.metricsGrouping == singleTable { + expectedNameOfTable = tC.tableName + createdIngestor = plugin.ingesters[expectedNameOfTable] + } + + require.NotNil(t, createdIngestor) + createdFakeIngestor := createdIngestor.(*fakeIngestor) + require.Equal(t, expectedNameOfMetric, createdFakeIngestor.actualOutputMetric["name"]) + + expectedFields := tC.expected["fields"].(map[string]interface{}) + require.Equal(t, expectedFields, createdFakeIngestor.actualOutputMetric["fields"]) + + expectedTags := tC.expected["tags"].(map[string]interface{}) + require.Equal(t, expectedTags, createdFakeIngestor.actualOutputMetric["tags"]) + + expectedTime := tC.expected["timestamp"].(float64) + require.Equal(t, expectedTime, createdFakeIngestor.actualOutputMetric["timestamp"]) + + createTableString := fmt.Sprintf(createTableCommandExpected, expectedNameOfTable) + require.Equal(t, createTableString, tC.client.queries[0]) + + createTableMappingString := fmt.Sprintf(createTableMappingCommandExpected, expectedNameOfTable, expectedNameOfTable) + require.Equal(t, createTableMappingString, tC.client.queries[1]) + } + }) + } +} + +func TestInitBlankEndpoint(t *testing.T) { + plugin := AzureDataExplorer{ + Log: testutil.Logger{}, + client: &fakeClient{}, + ingesters: map[string]localIngestor{}, + createIngestor: createFakeIngestor, + } + + errorInit := plugin.Init() + require.Error(t, errorInit) + require.Equal(t, "Endpoint configuration cannot be empty", errorInit.Error()) +} + +type fakeClient struct { + queries []string + internalMgmt func(client *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) +} + +func (f *fakeClient) Mgmt(ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + return f.internalMgmt(f, ctx, db, query, options...) +} + +type fakeIngestor struct { + actualOutputMetric map[string]interface{} +} + +func createFakeIngestor(client localClient, database string, tableName string) (localIngestor, error) { + return &fakeIngestor{}, nil +} +func (f *fakeIngestor) FromReader(ctx context.Context, reader io.Reader, options ...ingest.FileOption) (*ingest.Result, error) { + scanner := bufio.NewScanner(reader) + scanner.Scan() + firstLine := scanner.Text() + err := json.Unmarshal([]byte(firstLine), &f.actualOutputMetric) + if err != nil { + return nil, err + } + return &ingest.Result{}, nil +} diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index fbb49358665a5..9d835c1eb6f4b 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -40,7 +40,7 @@ written as a dimension on each Azure Monitor metric. ## The Azure Resource ID against which metric will be logged, e.g. ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # resource_id = "" - + ## Optionally, if in Azure US Government, China, or other sovereign ## cloud environment, set the appropriate REST endpoint for receiving ## metrics. (Note: region may be unused in this context) @@ -54,15 +54,7 @@ written as a dimension on each Azure Monitor metric. [enable system-assigned managed identity][enable msi]. 2. Use a region that supports Azure Monitor Custom Metrics, For regions with Custom Metrics support, an endpoint will be available with - the format `https://.monitoring.azure.com`. The following regions - are currently known to be supported: - - East US (eastus) - - West US 2 (westus2) - - South Central US (southcentralus) - - West Central US (westcentralus) - - North Europe (northeurope) - - West Europe (westeurope) - - Southeast Asia (southeastasia) + the format `https://.monitoring.azure.com`. [resource provider]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services [enable msi]: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/qs-configure-portal-windows-vm @@ -84,7 +76,7 @@ preferred authentication methods are different from the *order* in which each authentication is checked. Here are the preferred authentication methods: 1. Managed Service Identity (MSI) token - - This is the prefered authentication method. Telegraf will automatically + - This is the preferred authentication method. Telegraf will automatically authenticate using this method when running on Azure VMs. 2. AAD Application Tokens (Service Principals) - Primarily useful if Telegraf is writing metrics for other resources. @@ -140,7 +132,7 @@ authenticate when running Telegraf on Azure VMs. Azure Monitor only accepts values with a numeric type. The plugin will drop fields with a string type by default. The plugin can set all string type fields as extra dimensions in the Azure Monitor custom metric by setting the -configuration option `strings_as_dimensions` to `true`. +configuration option `strings_as_dimensions` to `true`. Keep in mind, Azure Monitor allows a maximum of 10 dimensions per metric. The plugin will deterministically dropped any dimensions that exceed the 10 diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index f2b1db1dd6868..ca511a5211860 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -7,8 +7,7 @@ import ( "encoding/json" "fmt" "hash/fnv" - "io/ioutil" - "log" + "io" "net/http" "regexp" "strings" @@ -17,7 +16,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/selfstat" @@ -26,12 +25,13 @@ import ( // AzureMonitor allows publishing of metrics to the Azure Monitor custom metrics // service type AzureMonitor struct { - Timeout internal.Duration - NamespacePrefix string `toml:"namespace_prefix"` - StringsAsDimensions bool `toml:"strings_as_dimensions"` - Region string - ResourceID string `toml:"resource_id"` - EndpointUrl string `toml:"endpoint_url"` + Timeout config.Duration + NamespacePrefix string `toml:"namespace_prefix"` + StringsAsDimensions bool `toml:"strings_as_dimensions"` + Region string `toml:"region"` + ResourceID string `toml:"resource_id"` + EndpointURL string `toml:"endpoint_url"` + Log telegraf.Logger `toml:"-"` url string auth autorest.Authorizer @@ -62,14 +62,14 @@ func (m *virtualMachineMetadata) ResourceID() string { m.Compute.ResourceGroupName, m.Compute.VMScaleSetName, ) - } else { - return fmt.Sprintf( - resourceIDTemplate, - m.Compute.SubscriptionID, - m.Compute.ResourceGroupName, - m.Compute.Name, - ) } + + return fmt.Sprintf( + resourceIDTemplate, + m.Compute.SubscriptionID, + m.Compute.ResourceGroupName, + m.Compute.Name, + ) } type dimension struct { @@ -144,25 +144,21 @@ func (a *AzureMonitor) SampleConfig() string { func (a *AzureMonitor) Connect() error { a.cache = make(map[time.Time]map[uint64]*aggregate, 36) - if a.Timeout.Duration == 0 { - a.Timeout.Duration = defaultRequestTimeout + if a.Timeout == 0 { + a.Timeout = config.Duration(defaultRequestTimeout) } a.client = &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: a.Timeout.Duration, - } - - if a.NamespacePrefix == "" { - a.NamespacePrefix = defaultNamespacePrefix + Timeout: time.Duration(a.Timeout), } var err error var region string var resourceID string - var endpointUrl string + var endpointURL string if a.Region == "" || a.ResourceID == "" { // Pull region and resource identifier @@ -177,8 +173,8 @@ func (a *AzureMonitor) Connect() error { if a.ResourceID != "" { resourceID = a.ResourceID } - if a.EndpointUrl != "" { - endpointUrl = a.EndpointUrl + if a.EndpointURL != "" { + endpointURL = a.EndpointURL } if resourceID == "" { @@ -187,17 +183,17 @@ func (a *AzureMonitor) Connect() error { return fmt.Errorf("no region configured or available via VM instance metadata") } - if endpointUrl == "" { + if endpointURL == "" { a.url = fmt.Sprintf(urlTemplate, region, resourceID) } else { - a.url = fmt.Sprintf(urlOverrideTemplate, endpointUrl, resourceID) + a.url = fmt.Sprintf(urlOverrideTemplate, endpointURL, resourceID) } - log.Printf("D! Writing to Azure Monitor URL: %s", a.url) + a.Log.Debugf("Writing to Azure Monitor URL: %s", a.url) a.auth, err = auth.NewAuthorizerFromEnvironmentWithResource(defaultAuthResource) if err != nil { - return nil + return err } a.Reset() @@ -225,7 +221,7 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return "", "", err } @@ -283,14 +279,14 @@ func (a *AzureMonitor) Write(metrics []telegraf.Metric) error { if azm, ok := azmetrics[id]; !ok { amm, err := translate(m, a.NamespacePrefix) if err != nil { - log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + a.Log.Errorf("Could not create azure metric for %q; discarding point", m.Name()) continue } azmetrics[id] = amm } else { amm, err := translate(m, a.NamespacePrefix) if err != nil { - log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + a.Log.Errorf("Could not create azure metric for %q; discarding point", m.Name()) continue } @@ -360,7 +356,7 @@ func (a *AzureMonitor) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } @@ -603,7 +599,7 @@ func (a *AzureMonitor) Push() []telegraf.Metric { tags[tag.name] = tag.value } - m, err := metric.New(agg.name, + m := metric.New(agg.name, tags, map[string]interface{}{ "min": agg.min, @@ -614,10 +610,6 @@ func (a *AzureMonitor) Push() []telegraf.Metric { tbucket, ) - if err != nil { - log.Printf("E! [outputs.azure_monitor]: could not create metric for aggregation %q; discarding point", agg.name) - } - metrics = append(metrics, m) } } @@ -646,7 +638,8 @@ func (a *AzureMonitor) Reset() { func init() { outputs.Add("azure_monitor", func() telegraf.Output { return &AzureMonitor{ - timeFunc: time.Now, + timeFunc: time.Now, + NamespacePrefix: defaultNamespacePrefix, } }) } diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go index 6fb40805ecd3e..803b0441af207 100644 --- a/plugins/outputs/azure_monitor/azure_monitor_test.go +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -6,10 +6,12 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "os" "testing" "time" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -29,6 +31,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -52,6 +55,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -75,6 +79,7 @@ func TestAggregate(t *testing.T) { Region: "test", ResourceID: "/test", StringsAsDimensions: true, + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -116,6 +121,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, cache: make(map[time.Time]map[uint64]*aggregate, 36), }, metrics: []telegraf.Metric{ @@ -153,6 +159,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, cache: make(map[time.Time]map[uint64]*aggregate, 36), }, metrics: []telegraf.Metric{ @@ -204,7 +211,11 @@ func TestAggregate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.plugin.Connect() + msiEndpoint, err := adal.GetMSIVMEndpoint() + require.NoError(t, err) + + os.Setenv("MSI_ENDPOINT", msiEndpoint) + err = tt.plugin.Connect() require.NoError(t, err) // Reset globals @@ -262,6 +273,7 @@ func TestWrite(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -282,6 +294,7 @@ func TestWrite(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -308,6 +321,7 @@ func TestWrite(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( diff --git a/plugins/outputs/bigquery/README.md b/plugins/outputs/bigquery/README.md new file mode 100644 index 0000000000000..9515711d50a75 --- /dev/null +++ b/plugins/outputs/bigquery/README.md @@ -0,0 +1,54 @@ +# Google BigQuery Output Plugin + +This plugin writes to the [Google Cloud BigQuery](https://cloud.google.com/bigquery) and requires [authentication](https://cloud.google.com/bigquery/docs/authentication) +with Google Cloud using either a service account or user credentials. + +Be aware that this plugin accesses APIs that are [chargeable](https://cloud.google.com/bigquery/pricing) and might incur costs. + +### Configuration + +```toml +[[outputs.bigquery]] + ## GCP Project + project = "erudite-bloom-151019" + + ## The BigQuery dataset + dataset = "telegraf" + + ## Timeout for BigQuery operations. + # timeout = "5s" + + ## Character to replace hyphens on Metric name + # replace_hyphen_to = "_" +``` +Requires `project` to specify where BigQuery entries will be persisted. + +Requires `dataset` to specify under which BigQuery dataset the corresponding metrics tables reside. + +Each metric should have a corresponding table to BigQuery. +The schema of the table on BigQuery: +* Should contain the field `timestamp` which is the timestamp of a telegraph metrics +* Should contain the metric's tags with the same name and the column type should be set to string. +* Should contain the metric's fields with the same name and the column type should match the field type. + +### Restrictions + +Avoid hyphens on BigQuery tables, underlying SDK cannot handle streaming inserts to Table with hyphens. + +In cases of metrics with hyphens please use the [Rename Processor Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/processors/rename). + +In case of a metric with hyphen by default hyphens shall be replaced with underscores (_). +This can be altered using the `replace_hyphen_to` configuration property. + +Available data type options are: +* integer +* float or long +* string +* boolean + +All field naming restrictions that apply to BigQuery should apply to the measurements to be imported. + +Tables on BigQuery should be created beforehand and they are not created during persistence + +Pay attention to the column `timestamp` since it is reserved upfront and cannot change. +If partitioning is required make sure it is applied beforehand. diff --git a/plugins/outputs/bigquery/bigquery.go b/plugins/outputs/bigquery/bigquery.go new file mode 100644 index 0000000000000..41af19d38d88e --- /dev/null +++ b/plugins/outputs/bigquery/bigquery.go @@ -0,0 +1,247 @@ +package bigquery + +import ( + "context" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const timeStampFieldName = "timestamp" + +var defaultTimeout = config.Duration(5 * time.Second) + +const sampleConfig = ` + ## Credentials File + credentials_file = "/path/to/service/account/key.json" + + ## Google Cloud Platform Project + project = "my-gcp-project" + + ## The namespace for the metric descriptor + dataset = "telegraf" + + ## Timeout for BigQuery operations. + # timeout = "5s" + + ## Character to replace hyphens on Metric name + # replace_hyphen_to = "_" +` + +type BigQuery struct { + CredentialsFile string `toml:"credentials_file"` + Project string `toml:"project"` + Dataset string `toml:"dataset"` + + Timeout config.Duration `toml:"timeout"` + ReplaceHyphenTo string `toml:"replace_hyphen_to"` + + Log telegraf.Logger `toml:"-"` + + client *bigquery.Client + + warnedOnHyphens map[string]bool +} + +// SampleConfig returns the formatted sample configuration for the plugin. +func (s *BigQuery) SampleConfig() string { + return sampleConfig +} + +// Description returns the human-readable function definition of the plugin. +func (s *BigQuery) Description() string { + return "Configuration for Google Cloud BigQuery to send entries" +} + +func (s *BigQuery) Connect() error { + if s.Project == "" { + return fmt.Errorf("Project is a required field for BigQuery output") + } + + if s.Dataset == "" { + return fmt.Errorf("Dataset is a required field for BigQuery output") + } + + if s.client == nil { + return s.setUpDefaultClient() + } + + s.warnedOnHyphens = make(map[string]bool) + + return nil +} + +func (s *BigQuery) setUpDefaultClient() error { + var credentialsOption option.ClientOption + + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Duration(s.Timeout)) + defer cancel() + + if s.CredentialsFile != "" { + credentialsOption = option.WithCredentialsFile(s.CredentialsFile) + } else { + creds, err := google.FindDefaultCredentials(ctx) + if err != nil { + return fmt.Errorf( + "unable to find Google Cloud Platform Application Default Credentials: %v. "+ + "Either set ADC or provide CredentialsFile config", err) + } + credentialsOption = option.WithCredentials(creds) + } + + client, err := bigquery.NewClient(ctx, s.Project, credentialsOption) + s.client = client + return err +} + +// Write the metrics to Google Cloud BigQuery. +func (s *BigQuery) Write(metrics []telegraf.Metric) error { + groupedMetrics := s.groupByMetricName(metrics) + + var wg sync.WaitGroup + + for k, v := range groupedMetrics { + wg.Add(1) + go func(k string, v []bigquery.ValueSaver) { + defer wg.Done() + s.insertToTable(k, v) + }(k, v) + } + + wg.Wait() + + return nil +} + +func (s *BigQuery) groupByMetricName(metrics []telegraf.Metric) map[string][]bigquery.ValueSaver { + groupedMetrics := make(map[string][]bigquery.ValueSaver) + + for _, m := range metrics { + bqm := newValuesSaver(m) + groupedMetrics[m.Name()] = append(groupedMetrics[m.Name()], bqm) + } + + return groupedMetrics +} + +func newValuesSaver(m telegraf.Metric) *bigquery.ValuesSaver { + s := make(bigquery.Schema, 0) + r := make([]bigquery.Value, 0) + timeSchema := timeStampFieldSchema() + s = append(s, timeSchema) + r = append(r, m.Time()) + + s, r = tagsSchemaAndValues(m, s, r) + s, r = valuesSchemaAndValues(m, s, r) + + return &bigquery.ValuesSaver{ + Schema: s.Relax(), + Row: r, + } +} + +func timeStampFieldSchema() *bigquery.FieldSchema { + return &bigquery.FieldSchema{ + Name: timeStampFieldName, + Type: bigquery.TimestampFieldType, + } +} + +func tagsSchemaAndValues(m telegraf.Metric, s bigquery.Schema, r []bigquery.Value) ([]*bigquery.FieldSchema, []bigquery.Value) { + for _, t := range m.TagList() { + s = append(s, tagFieldSchema(t)) + r = append(r, t.Value) + } + + return s, r +} + +func tagFieldSchema(t *telegraf.Tag) *bigquery.FieldSchema { + return &bigquery.FieldSchema{ + Name: t.Key, + Type: bigquery.StringFieldType, + } +} + +func valuesSchemaAndValues(m telegraf.Metric, s bigquery.Schema, r []bigquery.Value) ([]*bigquery.FieldSchema, []bigquery.Value) { + for _, f := range m.FieldList() { + s = append(s, valuesSchema(f)) + r = append(r, f.Value) + } + + return s, r +} + +func valuesSchema(f *telegraf.Field) *bigquery.FieldSchema { + return &bigquery.FieldSchema{ + Name: f.Key, + Type: valueToBqType(f.Value), + } +} + +func valueToBqType(v interface{}) bigquery.FieldType { + switch reflect.ValueOf(v).Kind() { + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return bigquery.IntegerFieldType + case reflect.Float32, reflect.Float64: + return bigquery.FloatFieldType + case reflect.Bool: + return bigquery.BooleanFieldType + default: + return bigquery.StringFieldType + } +} + +func (s *BigQuery) insertToTable(metricName string, metrics []bigquery.ValueSaver) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Duration(s.Timeout)) + defer cancel() + + tableName := s.metricToTable(metricName) + table := s.client.DatasetInProject(s.Project, s.Dataset).Table(tableName) + inserter := table.Inserter() + + if err := inserter.Put(ctx, metrics); err != nil { + s.Log.Errorf("inserting metric %q failed: %v", metricName, err) + } +} + +func (s *BigQuery) metricToTable(metricName string) string { + if !strings.Contains(metricName, "-") { + return metricName + } + + dhm := strings.ReplaceAll(metricName, "-", s.ReplaceHyphenTo) + + if warned := s.warnedOnHyphens[metricName]; !warned { + s.Log.Warnf("Metric %q contains hyphens please consider using the rename processor plugin, falling back to %q", metricName, dhm) + s.warnedOnHyphens[metricName] = true + } + + return dhm +} + +// Close will terminate the session to the backend, returning error if an issue arises. +func (s *BigQuery) Close() error { + return s.client.Close() +} + +func init() { + outputs.Add("bigquery", func() telegraf.Output { + return &BigQuery{ + Timeout: defaultTimeout, + ReplaceHyphenTo: "_", + } + }) +} diff --git a/plugins/outputs/bigquery/bigquery_test.go b/plugins/outputs/bigquery/bigquery_test.go new file mode 100644 index 0000000000000..dd029f23f7ff6 --- /dev/null +++ b/plugins/outputs/bigquery/bigquery_test.go @@ -0,0 +1,165 @@ +package bigquery + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "cloud.google.com/go/bigquery" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/api/option" +) + +const ( + successfulResponse = "{\"kind\": \"bigquery#tableDataInsertAllResponse\"}" +) + +var testingHost string +var testDuration = config.Duration(5 * time.Second) +var receivedBody map[string]json.RawMessage + +type Row struct { + Tag1 string `json:"tag1"` + Timestamp string `json:"timestamp"` + Value float64 `json:"value"` +} + +func TestConnect(t *testing.T) { + srv := localBigQueryServer(t) + testingHost = strings.ReplaceAll(srv.URL, "http://", "") + defer srv.Close() + + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + } + + cerr := b.setUpTestClient() + require.NoError(t, cerr) + berr := b.Connect() + require.NoError(t, berr) +} + +func TestWrite(t *testing.T) { + srv := localBigQueryServer(t) + testingHost = strings.ReplaceAll(srv.URL, "http://", "") + defer srv.Close() + + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + } + + mockMetrics := testutil.MockMetrics() + + if err := b.setUpTestClient(); err != nil { + require.NoError(t, err) + } + if err := b.Connect(); err != nil { + require.NoError(t, err) + } + + if err := b.Write(mockMetrics); err != nil { + require.NoError(t, err) + } + + var rows []map[string]json.RawMessage + if err := json.Unmarshal(receivedBody["rows"], &rows); err != nil { + require.NoError(t, err) + } + + var row Row + if err := json.Unmarshal(rows[0]["json"], &row); err != nil { + require.NoError(t, err) + } + + pt, _ := time.Parse(time.RFC3339, row.Timestamp) + require.Equal(t, mockMetrics[0].Tags()["tag1"], row.Tag1) + require.Equal(t, mockMetrics[0].Time(), pt) + require.Equal(t, mockMetrics[0].Fields()["value"], row.Value) +} + +func TestMetricToTableDefault(t *testing.T) { + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + warnedOnHyphens: make(map[string]bool), + ReplaceHyphenTo: "_", + Log: testutil.Logger{}, + } + + otn := "table-with-hyphens" + ntn := b.metricToTable(otn) + + require.Equal(t, "table_with_hyphens", ntn) + require.True(t, b.warnedOnHyphens[otn]) +} + +func TestMetricToTableCustom(t *testing.T) { + log := testutil.Logger{} + + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + warnedOnHyphens: make(map[string]bool), + ReplaceHyphenTo: "*", + Log: log, + } + + otn := "table-with-hyphens" + ntn := b.metricToTable(otn) + + require.Equal(t, "table*with*hyphens", ntn) + require.True(t, b.warnedOnHyphens[otn]) +} + +func (b *BigQuery) setUpTestClient() error { + noAuth := option.WithoutAuthentication() + endpoints := option.WithEndpoint("http://" + testingHost) + + ctx := context.Background() + + c, err := bigquery.NewClient(ctx, b.Project, noAuth, endpoints) + + if err != nil { + return err + } + + b.client = c + + return nil +} + +func localBigQueryServer(t *testing.T) *httptest.Server { + srv := httptest.NewServer(http.NotFoundHandler()) + + srv.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/projects/test-project/datasets/test-dataset/tables/test1/insertAll": + decoder := json.NewDecoder(r.Body) + + if err := decoder.Decode(&receivedBody); err != nil { + require.NoError(t, err) + } + + w.WriteHeader(http.StatusOK) + if _, err := w.Write([]byte(successfulResponse)); err != nil { + require.NoError(t, err) + } + default: + w.WriteHeader(http.StatusNotFound) + } + }) + + return srv +} diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index 826a75e1c1c68..38f037dd13296 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -4,11 +4,12 @@ import ( "context" "encoding/base64" "fmt" - "log" "sync" + "time" "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" @@ -72,12 +73,14 @@ type PubSub struct { Topic string `toml:"topic"` Attributes map[string]string `toml:"attributes"` - SendBatched bool `toml:"send_batched"` - PublishCountThreshold int `toml:"publish_count_threshold"` - PublishByteThreshold int `toml:"publish_byte_threshold"` - PublishNumGoroutines int `toml:"publish_num_go_routines"` - PublishTimeout internal.Duration `toml:"publish_timeout"` - Base64Data bool `toml:"base64_data"` + SendBatched bool `toml:"send_batched"` + PublishCountThreshold int `toml:"publish_count_threshold"` + PublishByteThreshold int `toml:"publish_byte_threshold"` + PublishNumGoroutines int `toml:"publish_num_go_routines"` + PublishTimeout config.Duration `toml:"publish_timeout"` + Base64Data bool `toml:"base64_data"` + + Log telegraf.Logger `toml:"-"` t topic c *pubsub.Client @@ -111,9 +114,8 @@ func (ps *PubSub) Connect() error { if ps.stubTopic == nil { return ps.initPubSubClient() - } else { - return nil } + return nil } func (ps *PubSub) Close() error { @@ -190,7 +192,7 @@ func (ps *PubSub) publishSettings() pubsub.PublishSettings { settings.NumGoroutines = ps.PublishNumGoroutines } - if ps.PublishTimeout.Duration > 0 { + if time.Duration(ps.PublishTimeout) > 0 { settings.CountThreshold = 1 } @@ -230,7 +232,7 @@ func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, erro for i, m := range metrics { b, err := ps.serializer.Serialize(m) if err != nil { - log.Printf("D! [outputs.cloud_pubsub] Could not serialize metric: %v", err) + ps.Log.Debugf("Could not serialize metric: %v", err) continue } diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go index 6911ef139cb1e..967a33d742c3c 100644 --- a/plugins/outputs/cloud_pubsub/pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -14,7 +14,6 @@ import ( ) func TestPubSub_WriteSingle(t *testing.T) { - testMetrics := []testMetric{ {testutil.TestMetric("value_1", "test"), false /*return error */}, } @@ -126,7 +125,6 @@ func TestPubSub_WriteOverByteThreshold(t *testing.T) { } func TestPubSub_WriteBase64Single(t *testing.T) { - testMetrics := []testMetric{ {testutil.TestMetric("value_1", "test"), false /*return error */}, {testutil.TestMetric("value_2", "test"), false}, @@ -185,7 +183,7 @@ func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string if err != nil { t.Fatalf("Unable to decode expected base64-encoded message: %s", err) } - data = []byte(v) + data = v } parsed, err := p.Parse(data) diff --git a/plugins/outputs/cloud_pubsub/topic_gcp.go b/plugins/outputs/cloud_pubsub/topic_gcp.go index a85c6f39eb8f6..72ef50efcba79 100644 --- a/plugins/outputs/cloud_pubsub/topic_gcp.go +++ b/plugins/outputs/cloud_pubsub/topic_gcp.go @@ -6,8 +6,6 @@ import ( ) type ( - topicFactory func(string) (topic, error) - topic interface { ID() string Stop() diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go index e23a765366704..c66e573a60115 100644 --- a/plugins/outputs/cloud_pubsub/topic_stubbed.go +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -13,7 +13,7 @@ import ( "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" "google.golang.org/api/support/bundler" @@ -84,7 +84,7 @@ func getTestResources(tT *testing.T, settings pubsub.PublishSettings, testM []te PublishCountThreshold: settings.CountThreshold, PublishByteThreshold: settings.ByteThreshold, PublishNumGoroutines: settings.NumGoroutines, - PublishTimeout: internal.Duration{Duration: settings.Timeout}, + PublishTimeout: config.Duration(settings.Timeout), } ps.SetSerializer(s) diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index 418fe86ffa489..56436c3c58d73 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -6,12 +6,16 @@ This plugin will send metrics to Amazon CloudWatch. This plugin uses a credential chain for Authentication with the CloudWatch API endpoint. In the following order the plugin will attempt to authenticate. -1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) -2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes -3. Shared profile from `profile` attribute -4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) -5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) -6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +1. Web identity provider credentials via STS if `role_arn` and `web_identity_token_file` are specified +2. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) +3. Explicit credentials from `access_key`, `secret_key`, and `token` attributes +4. Shared profile from `profile` attribute +5. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +6. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) +7. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) + +If you are using credentials from a web identity provider, you can specify the session name using `role_session_name`. If +left empty, the current timestamp will be used. The IAM user needs only the `cloudwatch:PutMetricData` permission. diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 5e59ba2aaec1d..129f014bfb548 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -1,14 +1,15 @@ package cloudwatch import ( - "log" + "context" "math" "sort" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -16,20 +17,15 @@ import ( ) type CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` - Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace HighResolutionMetrics bool `toml:"high_resolution_metrics"` - svc *cloudwatch.CloudWatch + svc *cloudwatch.Client WriteStatistics bool `toml:"write_statistics"` + + Log telegraf.Logger `toml:"-"` + + internalaws.CredentialConfig } type statisticType int @@ -44,7 +40,7 @@ const ( type cloudwatchField interface { addValue(sType statisticType, value float64) - buildDatum() []*cloudwatch.MetricDatum + buildDatum() []types.MetricDatum } type statisticField struct { @@ -62,36 +58,34 @@ func (f *statisticField) addValue(sType statisticType, value float64) { } } -func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { - - var datums []*cloudwatch.MetricDatum +func (f *statisticField) buildDatum() []types.MetricDatum { + var datums []types.MetricDatum if f.hasAllFields() { // If we have all required fields, we build datum with StatisticValues - min, _ := f.values[statisticTypeMin] - max, _ := f.values[statisticTypeMax] - sum, _ := f.values[statisticTypeSum] - count, _ := f.values[statisticTypeCount] + min := f.values[statisticTypeMin] + max := f.values[statisticTypeMax] + sum := f.values[statisticTypeSum] + count := f.values[statisticTypeCount] - datum := &cloudwatch.MetricDatum{ + datum := types.MetricDatum{ MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), - StatisticValues: &cloudwatch.StatisticSet{ + StatisticValues: &types.StatisticSet{ Minimum: aws.Float64(min), Maximum: aws.Float64(max), Sum: aws.Float64(sum), SampleCount: aws.Float64(count), }, - StorageResolution: aws.Int64(f.storageResolution), + StorageResolution: aws.Int32(int32(f.storageResolution)), } datums = append(datums, datum) - } else { // If we don't have all required fields, we build each field as independent datum for sType, value := range f.values { - datum := &cloudwatch.MetricDatum{ + datum := types.MetricDatum{ Value: aws.Float64(value), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), @@ -119,7 +113,6 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { } func (f *statisticField) hasAllFields() bool { - _, hasMin := f.values[statisticTypeMin] _, hasMax := f.values[statisticTypeMax] _, hasSum := f.values[statisticTypeSum] @@ -143,15 +136,14 @@ func (f *valueField) addValue(sType statisticType, value float64) { } } -func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { - - return []*cloudwatch.MetricDatum{ +func (f *valueField) buildDatum() []types.MetricDatum { + return []types.MetricDatum{ { MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), Value: aws.Float64(f.value), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), - StorageResolution: aws.Int64(f.storageResolution), + StorageResolution: aws.Int32(int32(f.storageResolution)), }, } } @@ -162,16 +154,19 @@ var sampleConfig = ` ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile #access_key = "" #secret_key = "" #token = "" #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" #profile = "" #shared_credential_file = "" @@ -184,12 +179,12 @@ var sampleConfig = ` ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" - ## If you have a large amount of metrics, you should consider to send statistic - ## values instead of raw metrics which could not only improve performance but - ## also save AWS API cost. If enable this flag, this plugin would parse the required - ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. - ## You could use basicstats aggregator to calculate those fields. If not all statistic - ## fields are available, all fields would still be sent as raw metrics. + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. # write_statistics = false ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) @@ -205,18 +200,12 @@ func (c *CloudWatch) Description() string { } func (c *CloudWatch) Connect() error { - credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, - EndpointURL: c.EndpointURL, + cfg, err := c.CredentialConfig.Credentials() + if err != nil { + return err } - configProvider := credentialConfig.Credentials() - c.svc = cloudwatch.New(configProvider) + + c.svc = cloudwatch.NewFromConfig(cfg) return nil } @@ -225,8 +214,7 @@ func (c *CloudWatch) Close() error { } func (c *CloudWatch) Write(metrics []telegraf.Metric) error { - - var datums []*cloudwatch.MetricDatum + var datums []types.MetricDatum for _, m := range metrics { d := BuildMetricDatum(c.WriteStatistics, c.HighResolutionMetrics, m) datums = append(datums, d...) @@ -244,16 +232,16 @@ func (c *CloudWatch) Write(metrics []telegraf.Metric) error { return nil } -func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { +func (c *CloudWatch) WriteToCloudWatch(datums []types.MetricDatum) error { params := &cloudwatch.PutMetricDataInput{ MetricData: datums, Namespace: aws.String(c.Namespace), } - _, err := c.svc.PutMetricData(params) + _, err := c.svc.PutMetricData(context.Background(), params) if err != nil { - log.Printf("E! CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error()) + c.Log.Errorf("Unable to write to CloudWatch : %+v", err.Error()) } return err @@ -261,14 +249,13 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { // Partition the MetricDatums into smaller slices of a max size so that are under the limit // for the AWS API calls. -func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch.MetricDatum { - +func PartitionDatums(size int, datums []types.MetricDatum) [][]types.MetricDatum { numberOfPartitions := len(datums) / size if len(datums)%size != 0 { - numberOfPartitions += 1 + numberOfPartitions++ } - partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions) + partitions := make([][]types.MetricDatum, numberOfPartitions) for i := 0; i < numberOfPartitions; i++ { start := size * i @@ -286,8 +273,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch // Make a MetricDatum from telegraf.Metric. It would check if all required fields of // cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. // Otherwise, fields would still been built independently. -func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []*cloudwatch.MetricDatum { - +func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []types.MetricDatum { fields := make(map[string]cloudwatchField) tags := point.Tags() storageResolution := int64(60) @@ -296,7 +282,6 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel } for k, v := range point.Fields() { - val, ok := convert(v) if !ok { // Only fields with values that can be converted to float64 (and within CloudWatch boundary) are supported. @@ -338,7 +323,7 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel } } - var datums []*cloudwatch.MetricDatum + var datums []types.MetricDatum for _, f := range fields { d := f.buildDatum() datums = append(datums, d...) @@ -350,13 +335,13 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel // Make a list of Dimensions by using a Point's tags. CloudWatch supports up to // 10 dimensions per metric so we only keep up to the first 10 alphabetically. // This always includes the "host" tag if it exists. -func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { +func BuildDimensions(mTags map[string]string) []types.Dimension { const MaxDimensions = 10 - dimensions := make([]*cloudwatch.Dimension, 0, MaxDimensions) + dimensions := make([]types.Dimension, 0, MaxDimensions) // This is pretty ugly but we always want to include the "host" tag if it exists. if host, ok := mTags["host"]; ok { - dimensions = append(dimensions, &cloudwatch.Dimension{ + dimensions = append(dimensions, types.Dimension{ Name: aws.String("host"), Value: aws.String(host), }) @@ -380,7 +365,7 @@ func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { continue } - dimensions = append(dimensions, &cloudwatch.Dimension{ + dimensions = append(dimensions, types.Dimension{ Name: aws.String(k), Value: aws.String(mTags[k]), }) @@ -411,7 +396,6 @@ func getStatisticType(name string) (sType statisticType, fieldName string) { } func convert(v interface{}) (value float64, ok bool) { - ok = true switch t := v.(type) { diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index b2466e4d046d4..df98381cf3f90 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -2,14 +2,13 @@ package cloudwatch import ( "fmt" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "math" "sort" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - + "github.com/aws/aws-sdk-go-v2/aws" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" @@ -31,7 +30,7 @@ func TestBuildDimensions(t *testing.T) { i := 0 for k := range testPoint.Tags() { tagKeys[i] = k - i += 1 + i++ } sort.Strings(tagKeys) @@ -83,7 +82,7 @@ func TestBuildMetricDatums(t *testing.T) { assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) } - statisticMetric, _ := metric.New( + statisticMetric := metric.New( "test1", map[string]string{"tag1": "value1"}, map[string]interface{}{"value_max": float64(10), "value_min": float64(0), "value_sum": float64(100), "value_count": float64(20)}, @@ -92,7 +91,7 @@ func TestBuildMetricDatums(t *testing.T) { datums := BuildMetricDatum(true, false, statisticMetric) assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) - multiFieldsMetric, _ := metric.New( + multiFieldsMetric := metric.New( "test1", map[string]string{"tag1": "value1"}, map[string]interface{}{"valueA": float64(10), "valueB": float64(0), "valueC": float64(100), "valueD": float64(20)}, @@ -101,7 +100,7 @@ func TestBuildMetricDatums(t *testing.T) { datums = BuildMetricDatum(true, false, multiFieldsMetric) assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) - multiStatisticMetric, _ := metric.New( + multiStatisticMetric := metric.New( "test1", map[string]string{"tag1": "value1"}, map[string]interface{}{ @@ -117,8 +116,8 @@ func TestBuildMetricDatums(t *testing.T) { } func TestMetricDatumResolution(t *testing.T) { - const expectedStandardResolutionValue = int64(60) - const expectedHighResolutionValue = int64(1) + const expectedStandardResolutionValue = int32(60) + const expectedHighResolutionValue = int32(1) assert := assert.New(t) @@ -151,22 +150,21 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { } func TestPartitionDatums(t *testing.T) { - assert := assert.New(t) - testDatum := cloudwatch.MetricDatum{ + testDatum := types.MetricDatum{ MetricName: aws.String("Foo"), Value: aws.Float64(1), } - zeroDatum := []*cloudwatch.MetricDatum{} - oneDatum := []*cloudwatch.MetricDatum{&testDatum} - twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum} - threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum} + zeroDatum := []types.MetricDatum{} + oneDatum := []types.MetricDatum{testDatum} + twoDatum := []types.MetricDatum{testDatum, testDatum} + threeDatum := []types.MetricDatum{testDatum, testDatum, testDatum} - assert.Equal([][]*cloudwatch.MetricDatum{}, PartitionDatums(2, zeroDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) + assert.Equal([][]types.MetricDatum{}, PartitionDatums(2, zeroDatum)) + assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]types.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) + assert.Equal([][]types.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) } diff --git a/plugins/outputs/cloudwatch_logs/README.md b/plugins/outputs/cloudwatch_logs/README.md new file mode 100644 index 0000000000000..ab745d877ff9c --- /dev/null +++ b/plugins/outputs/cloudwatch_logs/README.md @@ -0,0 +1,82 @@ +## Amazon CloudWatch Logs Output for Telegraf + +This plugin will send logs to Amazon CloudWatch. + +## Amazon Authentication + +This plugin uses a credential chain for Authentication with the CloudWatch Logs +API endpoint. In the following order the plugin will attempt to authenticate. +1. Web identity provider credentials via STS if `role_arn` and `web_identity_token_file` are specified +2. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) +3. Explicit credentials from `access_key`, `secret_key`, and `token` attributes +4. Shared profile from `profile` attribute +5. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +6. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) +7. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) + +The IAM user needs the following permissions ( https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/permissions-reference-cwl.html): +- `logs:DescribeLogGroups` - required for check if configured log group exist +- `logs:DescribeLogStreams` - required to view all log streams associated with a log group. +- `logs:CreateLogStream` - required to create a new log stream in a log group.) +- `logs:PutLogEvents` - required to upload a batch of log events into log stream. + +## Config +```toml +[[outputs.cloudwatch_logs]] + ## The region is the Amazon region that you wish to connect to. + ## Examples include but are not limited to: + ## - us-west-1 + ## - us-west-2 + ## - us-east-1 + ## - ap-southeast-1 + ## - ap-southeast-2 + ## ... + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! + ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place + log_group = "my-group-name" + + ## Log stream in log group + ## Either log group name or reference to metric attribute, from which it can be parsed: + ## tag: or field:. If log stream is not exist, it will be created. + ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) + ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) + log_stream = "tag:location" + + ## Source of log data - metric name + ## specify the name of the metric, from which the log data should be retrieved. + ## I.e., if you are using docker_log plugin to stream logs from container, then + ## specify log_data_metric_name = "docker_log" + log_data_metric_name = "docker_log" + + ## Specify from which metric attribute the log data should be retrieved: + ## tag: or field:. + ## I.e., if you are using docker_log plugin to stream logs from container, then + ## specify log_data_source = "field:message" + log_data_source = "field:message" +``` \ No newline at end of file diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go new file mode 100644 index 0000000000000..952fea4b2a9a4 --- /dev/null +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -0,0 +1,428 @@ +package cloudwatch_logs + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type messageBatch struct { + logEvents []types.InputLogEvent + messageCount int +} +type logStreamContainer struct { + currentBatchSizeBytes int + currentBatchIndex int + messageBatches []messageBatch + sequenceToken string +} + +//Cloudwatch Logs service interface +type cloudWatchLogs interface { + DescribeLogGroups(context.Context, *cloudwatchlogs.DescribeLogGroupsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + DescribeLogStreams(context.Context, *cloudwatchlogs.DescribeLogStreamsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + CreateLogStream(context.Context, *cloudwatchlogs.CreateLogStreamInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(context.Context, *cloudwatchlogs.PutLogEventsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +// CloudWatchLogs plugin object definition +type CloudWatchLogs struct { + LogGroup string `toml:"log_group"` + lg *types.LogGroup //log group data + + LogStream string `toml:"log_stream"` + lsKey string //log stream source: tag or field + lsSource string //log stream source tag or field name + ls map[string]*logStreamContainer //log stream info + + LDMetricName string `toml:"log_data_metric_name"` + + LDSource string `toml:"log_data_source"` + logDatKey string //log data source (tag or field) + logDataSource string //log data source tag or field name + + svc cloudWatchLogs //cloudwatch logs service + + Log telegraf.Logger `toml:"-"` + + internalaws.CredentialConfig +} + +const ( + // Log events must comply with the following + // (https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatchlogs/#CloudWatchLogs.PutLogEvents): + maxLogMessageLength = 262144 - awsOverheadPerLogMessageBytes //In bytes + maxBatchSizeBytes = 1048576 // The sum of all event messages in UTF-8, plus 26 bytes for each log event + awsOverheadPerLogMessageBytes = 26 + maxFutureLogEventTimeOffset = time.Hour * 2 // None of the log events in the batch can be more than 2 hours in the future. + + maxPastLogEventTimeOffset = time.Hour * 24 * 14 // None of the log events in the batch can be older than 14 days or older + // than the retention period of the log group. + + maxItemsInBatch = 10000 // The maximum number of log events in a batch is 10,000. + + //maxTimeSpanInBatch = time.Hour * 24 // A batch of log events in a single request cannot span more than 24 hours. + // Otherwise, the operation fails. +) + +var sampleConfig = ` +## The region is the Amazon region that you wish to connect to. +## Examples include but are not limited to: +## - us-west-1 +## - us-west-2 +## - us-east-1 +## - ap-southeast-1 +## - ap-southeast-2 +## ... +region = "us-east-1" + +## Amazon Credentials +## Credentials are loaded in the following order +## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +## 2) Assumed credentials via STS if role_arn is specified +## 3) explicit credentials from 'access_key' and 'secret_key' +## 4) shared profile from 'profile' +## 5) environment variables +## 6) shared credentials file +## 7) EC2 Instance Profile +#access_key = "" +#secret_key = "" +#token = "" +#role_arn = "" +#web_identity_token_file = "" +#role_session_name = "" +#profile = "" +#shared_credential_file = "" + +## Endpoint to make request against, the correct endpoint is automatically +## determined and this option should only be set if you wish to override the +## default. +## ex: endpoint_url = "http://localhost:8000" +# endpoint_url = "" + +## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +log_group = "my-group-name" + +## Log stream in log group +## Either log group name or reference to metric attribute, from which it can be parsed: +## tag: or field:. If log stream is not exist, it will be created. +## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +log_stream = "tag:location" + +## Source of log data - metric name +## specify the name of the metric, from which the log data should be retrieved. +## I.e., if you are using docker_log plugin to stream logs from container, then +## specify log_data_metric_name = "docker_log" +log_data_metric_name = "docker_log" + +## Specify from which metric attribute the log data should be retrieved: +## tag: or field:. +## I.e., if you are using docker_log plugin to stream logs from container, then +## specify log_data_source = "field:message" +log_data_source = "field:message" +` + +// SampleConfig returns sample config description for plugin +func (c *CloudWatchLogs) SampleConfig() string { + return sampleConfig +} + +// Description returns one-liner description for plugin +func (c *CloudWatchLogs) Description() string { + return "Configuration for AWS CloudWatchLogs output." +} + +// Init initialize plugin with checking configuration parameters +func (c *CloudWatchLogs) Init() error { + if c.LogGroup == "" { + return fmt.Errorf("log group is not set") + } + + if c.LogStream == "" { + return fmt.Errorf("log stream is not set") + } + + if c.LDMetricName == "" { + return fmt.Errorf("log data metrics name is not set") + } + + if c.LDSource == "" { + return fmt.Errorf("log data source is not set") + } + lsSplitArray := strings.Split(c.LDSource, ":") + if len(lsSplitArray) != 2 { + return fmt.Errorf("log data source is not properly formatted, ':' is missed.\n" + + "Should be 'tag:' or 'field:'") + } + + if lsSplitArray[0] != "tag" && lsSplitArray[0] != "field" { + return fmt.Errorf("log data source is not properly formatted.\n" + + "Should be 'tag:' or 'field:'") + } + + c.logDatKey = lsSplitArray[0] + c.logDataSource = lsSplitArray[1] + c.Log.Debugf("Log data: key '%s', source '%s'...", c.logDatKey, c.logDataSource) + + if c.lsSource == "" { + c.lsSource = c.LogStream + c.Log.Debugf("Log stream '%s'...", c.lsSource) + } + + return nil +} + +// Connect connects plugin with to receiver of metrics +func (c *CloudWatchLogs) Connect() error { + var queryToken *string + var dummyToken = "dummy" + var logGroupsOutput = &cloudwatchlogs.DescribeLogGroupsOutput{NextToken: &dummyToken} + var err error + + cfg, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.svc = cloudwatchlogs.NewFromConfig(cfg) + + //Find log group with name 'c.LogGroup' + if c.lg == nil { //In case connection is not retried, first time + for logGroupsOutput.NextToken != nil { + logGroupsOutput, err = c.svc.DescribeLogGroups( + context.Background(), + &cloudwatchlogs.DescribeLogGroupsInput{ + LogGroupNamePrefix: &c.LogGroup, + NextToken: queryToken}) + + if err != nil { + return err + } + queryToken = logGroupsOutput.NextToken + + for _, logGroup := range logGroupsOutput.LogGroups { + if *(logGroup.LogGroupName) == c.LogGroup { + c.Log.Debugf("Found log group %q", c.LogGroup) + c.lg = &logGroup //nolint:revive + } + } + } + + if c.lg == nil { + return fmt.Errorf("can't find log group %q", c.LogGroup) + } + + lsSplitArray := strings.Split(c.LogStream, ":") + if len(lsSplitArray) > 1 { + if lsSplitArray[0] == "tag" || lsSplitArray[0] == "field" { + c.lsKey = lsSplitArray[0] + c.lsSource = lsSplitArray[1] + c.Log.Debugf("Log stream: key %q, source %q...", c.lsKey, c.lsSource) + } + } + + if c.lsSource == "" { + c.lsSource = c.LogStream + c.Log.Debugf("Log stream %q...", c.lsSource) + } + + c.ls = map[string]*logStreamContainer{} + } + + return nil +} + +// Close closes plugin connection with remote receiver +func (c *CloudWatchLogs) Close() error { + return nil +} + +// Write perform metrics write to receiver of metrics +func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { + minTime := time.Now() + if c.lg.RetentionInDays != nil { + minTime = minTime.Add(-time.Hour * 24 * time.Duration(*c.lg.RetentionInDays)) + } else { + minTime = minTime.Add(-maxPastLogEventTimeOffset) + } + + maxTime := time.Now().Add(maxFutureLogEventTimeOffset) + + for _, m := range metrics { + //Filtering metrics + if m.Name() != c.LDMetricName { + continue + } + + if m.Time().After(maxTime) || m.Time().Before(minTime) { + c.Log.Debugf("Processing metric '%v': Metric is filtered based on TS!", m) + continue + } + + tags := m.Tags() + fields := m.Fields() + + logStream := "" + logData := "" + lsContainer := &logStreamContainer{ + currentBatchSizeBytes: 0, + currentBatchIndex: 0, + messageBatches: []messageBatch{{}}} + + switch c.lsKey { + case "tag": + logStream = tags[c.lsSource] + case "field": + if fields[c.lsSource] != nil { + logStream = fields[c.lsSource].(string) + } + default: + logStream = c.lsSource + } + + if logStream == "" { + c.Log.Errorf("Processing metric '%v': log stream: key %q, source %q, not found!", m, c.lsKey, c.lsSource) + continue + } + + switch c.logDatKey { + case "tag": + logData = tags[c.logDataSource] + case "field": + if fields[c.logDataSource] != nil { + logData = fields[c.logDataSource].(string) + } + } + + if logData == "" { + c.Log.Errorf("Processing metric '%v': log data: key %q, source %q, not found!", m, c.logDatKey, c.logDataSource) + continue + } + + //Check if message size is not fit to batch + if len(logData) > maxLogMessageLength { + metricStr := fmt.Sprintf("%v", m) + c.Log.Errorf("Processing metric '%s...', message is too large to fit to aws max log message size: %d (bytes) !", metricStr[0:maxLogMessageLength/1000], maxLogMessageLength) + continue + } + //Batching log messages + //awsOverheadPerLogMessageBytes - is mandatory aws overhead per each log message + messageSizeInBytesForAWS := len(logData) + awsOverheadPerLogMessageBytes + + //Pick up existing or prepare new log stream container. + //Log stream container stores logs per log stream in + //the AWS Cloudwatch logs API friendly structure + if val, ok := c.ls[logStream]; ok { + lsContainer = val + } else { + lsContainer.messageBatches[0].messageCount = 0 + lsContainer.messageBatches[0].logEvents = []types.InputLogEvent{} + c.ls[logStream] = lsContainer + } + + if lsContainer.currentBatchSizeBytes+messageSizeInBytesForAWS > maxBatchSizeBytes || + lsContainer.messageBatches[lsContainer.currentBatchIndex].messageCount >= maxItemsInBatch { + //Need to start new batch, and reset counters + lsContainer.currentBatchIndex++ + lsContainer.messageBatches = append(lsContainer.messageBatches, + messageBatch{ + logEvents: []types.InputLogEvent{}, + messageCount: 0}) + lsContainer.currentBatchSizeBytes = messageSizeInBytesForAWS + } else { + lsContainer.currentBatchSizeBytes += messageSizeInBytesForAWS + lsContainer.messageBatches[lsContainer.currentBatchIndex].messageCount++ + } + + //AWS need time in milliseconds. time.UnixNano() returns time in nanoseconds since epoch + //we store here TS with nanosec precision iun order to have proper ordering, later ts will be reduced to milliseconds + metricTime := m.Time().UnixNano() + //Adding metring to batch + lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents = + append(lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents, + types.InputLogEvent{ + Message: &logData, + Timestamp: &metricTime}) + } + + // Sorting out log events by TS and sending them to cloud watch logs + for logStream, elem := range c.ls { + for index, batch := range elem.messageBatches { + if len(batch.logEvents) == 0 { //can't push empty batch + //c.Log.Warnf("Empty batch detected, skipping...") + continue + } + //Sorting + sort.Slice(batch.logEvents[:], func(i, j int) bool { + return *batch.logEvents[i].Timestamp < *batch.logEvents[j].Timestamp + }) + + putLogEvents := cloudwatchlogs.PutLogEventsInput{LogGroupName: &c.LogGroup, LogStreamName: &logStream} + if elem.sequenceToken == "" { + //This is the first attempt to write to log stream, + //need to check log stream existence and create it if necessary + describeLogStreamOutput, err := c.svc.DescribeLogStreams(context.Background(), &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: &c.LogGroup, + LogStreamNamePrefix: &logStream}) + if err == nil && len(describeLogStreamOutput.LogStreams) == 0 { + _, err := c.svc.CreateLogStream(context.Background(), &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: &c.LogGroup, + LogStreamName: &logStream}) + if err != nil { + c.Log.Errorf("Can't create log stream %q in log group. Reason: %v %q.", logStream, c.LogGroup, err) + continue + } + putLogEvents.SequenceToken = nil + } else if err == nil && len(describeLogStreamOutput.LogStreams) == 1 { + putLogEvents.SequenceToken = describeLogStreamOutput.LogStreams[0].UploadSequenceToken + } else if err == nil && len(describeLogStreamOutput.LogStreams) > 1 { //Ambiguity + c.Log.Errorf("More than 1 log stream found with prefix %q in log group %q.", logStream, c.LogGroup) + continue + } else { + c.Log.Errorf("Error describing log streams in log group %q. Reason: %v", c.LogGroup, err) + continue + } + } else { + putLogEvents.SequenceToken = &elem.sequenceToken + } + + //Upload log events + //Adjusting TS to be in align with cloudwatch logs requirements + for _, event := range batch.logEvents { + *event.Timestamp = *event.Timestamp / 1000000 + } + putLogEvents.LogEvents = batch.logEvents + + //There is a quota of 5 requests per second per log stream. Additional + //requests are throttled. This quota can't be changed. + putLogEventsOutput, err := c.svc.PutLogEvents(context.Background(), &putLogEvents) + if err != nil { + c.Log.Errorf("Can't push logs batch to AWS. Reason: %v", err) + continue + } + //Cleanup batch + elem.messageBatches[index] = messageBatch{ + logEvents: []types.InputLogEvent{}, + messageCount: 0} + + elem.sequenceToken = *putLogEventsOutput.NextSequenceToken + } + } + + return nil +} + +func init() { + outputs.Add("cloudwatch_logs", func() telegraf.Output { + return &CloudWatchLogs{} + }) +} diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go new file mode 100644 index 0000000000000..e103eb53d24e6 --- /dev/null +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go @@ -0,0 +1,549 @@ +package cloudwatch_logs + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "testing" + "time" + + cloudwatchlogsV2 "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +type mockCloudWatchLogs struct { + logStreamName string + pushedLogEvents []types.InputLogEvent +} + +func (c *mockCloudWatchLogs) Init(lsName string) { + c.logStreamName = lsName + c.pushedLogEvents = make([]types.InputLogEvent, 0) +} + +func (c *mockCloudWatchLogs) DescribeLogGroups(context.Context, *cloudwatchlogsV2.DescribeLogGroupsInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.DescribeLogGroupsOutput, error) { + return nil, nil +} + +func (c *mockCloudWatchLogs) DescribeLogStreams(context.Context, *cloudwatchlogsV2.DescribeLogStreamsInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.DescribeLogStreamsOutput, error) { + arn := "arn" + creationTime := time.Now().Unix() + sequenceToken := "arbitraryToken" + output := &cloudwatchlogsV2.DescribeLogStreamsOutput{ + LogStreams: []types.LogStream{ + { + Arn: &arn, + CreationTime: &creationTime, + FirstEventTimestamp: &creationTime, + LastEventTimestamp: &creationTime, + LastIngestionTime: &creationTime, + LogStreamName: &c.logStreamName, + UploadSequenceToken: &sequenceToken, + }}, + NextToken: &sequenceToken, + } + return output, nil +} +func (c *mockCloudWatchLogs) CreateLogStream(context.Context, *cloudwatchlogsV2.CreateLogStreamInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.CreateLogStreamOutput, error) { + return nil, nil +} +func (c *mockCloudWatchLogs) PutLogEvents(_ context.Context, input *cloudwatchlogsV2.PutLogEventsInput, _ ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.PutLogEventsOutput, error) { + sequenceToken := "arbitraryToken" + output := &cloudwatchlogsV2.PutLogEventsOutput{NextSequenceToken: &sequenceToken} + //Saving messages + for _, event := range input.LogEvents { + c.pushedLogEvents = append(c.pushedLogEvents, event) + } + + return output, nil +} + +//Ensure mockCloudWatchLogs implement cloudWatchLogs interface +var _ cloudWatchLogs = (*mockCloudWatchLogs)(nil) + +func RandStringBytes(n int) string { + const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + return string(b) +} +func TestInit(t *testing.T) { + tests := []struct { + name string + expectedErrorString string + plugin *CloudWatchLogs + }{ + { + name: "log group is not set", + expectedErrorString: "log group is not set", + plugin: &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + }, + LogGroup: "", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log stream is not set", + expectedErrorString: "log stream is not set", + plugin: &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + }, + LogGroup: "TestLogGroup", + LogStream: "", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data metrics name is not set", + expectedErrorString: "log data metrics name is not set", + plugin: &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + }, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data source is not set", + expectedErrorString: "log data source is not set", + plugin: &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + }, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data source is not properly formatted (no divider)", + expectedErrorString: "log data source is not properly formatted, ':' is missed.\n" + + "Should be 'tag:' or 'field:'", + plugin: &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + }, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field_message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data source is not properly formatted (inappropriate fields)", + expectedErrorString: "log data source is not properly formatted.\n" + + "Should be 'tag:' or 'field:'", + plugin: &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + }, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "bla:bla", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "valid config", + plugin: &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + }, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "tag:location", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.expectedErrorString != "" { + require.EqualError(t, tt.plugin.Init(), tt.expectedErrorString) + } else { + require.Nil(t, tt.plugin.Init()) + } + }) + } +} + +func TestConnect(t *testing.T) { + //mock cloudwatch logs endpoint that is used only in plugin.Connect + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintln(w, + `{ + "logGroups": [ + { + "arn": "string", + "creationTime": 123456789, + "kmsKeyId": "string", + "logGroupName": "TestLogGroup", + "metricFilterCount": 1, + "retentionInDays": 10, + "storedBytes": 0 + } + ] + }`) + })) + defer ts.Close() + + plugin := &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + EndpointURL: ts.URL, + }, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + } + + require.Nil(t, plugin.Init()) + require.Nil(t, plugin.Connect()) +} + +func TestWrite(t *testing.T) { + //mock cloudwatch logs endpoint that is used only in plugin.Connect + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintln(w, + `{ + "logGroups": [ + { + "arn": "string", + "creationTime": 123456789, + "kmsKeyId": "string", + "logGroupName": "TestLogGroup", + "metricFilterCount": 1, + "retentionInDays": 1, + "storedBytes": 0 + } + ] + }`) + })) + defer ts.Close() + + plugin := &CloudWatchLogs{ + CredentialConfig: internalaws.CredentialConfig{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + EndpointURL: ts.URL, + }, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + } + require.Nil(t, plugin.Init()) + require.Nil(t, plugin.Connect()) + + tests := []struct { + name string + logStreamName string + metrics []telegraf.Metric + expectedMetricsOrder map[int]int //map[] + expectedMetricsCount int + }{ + { + name: "Sorted by timestamp log entries", + logStreamName: "deadbeef", + expectedMetricsOrder: map[int]int{0: 0, 1: 1}, + expectedMetricsCount: 2, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Sorted: message #1", + }, + time.Now().Add(-time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Sorted: message #2", + }, + time.Now(), + ), + }, + }, + { + name: "Unsorted log entries", + logStreamName: "deadbeef", + expectedMetricsOrder: map[int]int{0: 1, 1: 0}, + expectedMetricsCount: 2, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Unsorted: message #1", + }, + time.Now(), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Unsorted: message #2", + }, + time.Now().Add(-time.Minute), + ), + }, + }, + { + name: "Too old log entry & log entry in the future", + logStreamName: "deadbeef", + expectedMetricsCount: 0, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "message #1", + }, + time.Now().Add(-maxPastLogEventTimeOffset).Add(-time.Hour), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "message #2", + }, + time.Now().Add(maxFutureLogEventTimeOffset).Add(time.Hour), + ), + }, + }, + { + name: "Oversized log entry", + logStreamName: "deadbeef", + expectedMetricsCount: 0, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message + "message": RandStringBytes(maxLogMessageLength + 1), + }, + time.Now().Add(-time.Minute), + ), + }, + }, + { + name: "Batching log entries", + logStreamName: "deadbeef", + expectedMetricsOrder: map[int]int{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}, + expectedMetricsCount: 5, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message1:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-4*time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message2:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-3*time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message3:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-2*time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message4:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "batch2 message1", + }, + time.Now(), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + //Overwrite cloud watch log endpoint + mockCwl := &mockCloudWatchLogs{} + mockCwl.Init(tt.logStreamName) + plugin.svc = mockCwl + require.Nil(t, plugin.Write(tt.metrics)) + require.Equal(t, tt.expectedMetricsCount, len(mockCwl.pushedLogEvents)) + + for index, elem := range mockCwl.pushedLogEvents { + require.Equal(t, *elem.Message, tt.metrics[tt.expectedMetricsOrder[index]].Fields()["message"]) + require.Equal(t, *elem.Timestamp, tt.metrics[tt.expectedMetricsOrder[index]].Time().UnixNano()/1000000) + } + }) + } +} diff --git a/plugins/outputs/cratedb/README.md b/plugins/outputs/cratedb/README.md index a8a01fdfe99d7..11214092d26c2 100644 --- a/plugins/outputs/cratedb/README.md +++ b/plugins/outputs/cratedb/README.md @@ -26,8 +26,8 @@ config option, see below. ```toml # Configuration for CrateDB to send metrics to. [[outputs.cratedb]] - # A github.com/jackc/pgx connection string. - # See https://godoc.org/github.com/jackc/pgx#ParseDSN + # A github.com/jackc/pgx/v4 connection string. + # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig url = "postgres://user:password@localhost/schema?sslmode=disable" # Timeout for all CrateDB queries. timeout = "5s" @@ -35,4 +35,6 @@ config option, see below. table = "metrics" # If true, and the metrics table does not exist, create it automatically. table_create = true + # The character(s) to replace any '.' in an object key with + key_separator = "_" ``` diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index f6840cc38958b..b56787114d709 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -12,24 +12,25 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" - _ "github.com/jackc/pgx/stdlib" + _ "github.com/jackc/pgx/v4/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit ) const MaxInt64 = int64(^uint64(0) >> 1) type CrateDB struct { - URL string - Timeout internal.Duration - Table string - TableCreate bool `toml:"table_create"` - DB *sql.DB + URL string + Timeout config.Duration + Table string + TableCreate bool `toml:"table_create"` + KeySeparator string `toml:"key_separator"` + DB *sql.DB } var sampleConfig = ` - # A github.com/jackc/pgx connection string. - # See https://godoc.org/github.com/jackc/pgx#ParseDSN + # A github.com/jackc/pgx/v4 connection string. + # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig url = "postgres://user:password@localhost/schema?sslmode=disable" # Timeout for all CrateDB queries. timeout = "5s" @@ -37,6 +38,8 @@ var sampleConfig = ` table = "metrics" # If true, and the metrics table does not exist, create it automatically. table_create = true + # The character(s) to replace any '.' in an object key with + key_separator = "_" ` func (c *CrateDB) Connect() error { @@ -55,7 +58,7 @@ CREATE TABLE IF NOT EXISTS ` + c.Table + ` ( PRIMARY KEY ("timestamp", "hash_id","day") ) PARTITIONED BY("day"); ` - ctx, cancel := context.WithTimeout(context.Background(), c.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Timeout)) defer cancel() if _, err := db.ExecContext(ctx, sql); err != nil { return err @@ -66,20 +69,25 @@ CREATE TABLE IF NOT EXISTS ` + c.Table + ` ( } func (c *CrateDB) Write(metrics []telegraf.Metric) error { - ctx, cancel := context.WithTimeout(context.Background(), c.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Timeout)) defer cancel() - if sql, err := insertSQL(c.Table, metrics); err != nil { + + generatedSQL, err := insertSQL(c.Table, c.KeySeparator, metrics) + if err != nil { return err - } else if _, err := c.DB.ExecContext(ctx, sql); err != nil { + } + + _, err = c.DB.ExecContext(ctx, generatedSQL) + if err != nil { return err } + return nil } -func insertSQL(table string, metrics []telegraf.Metric) (string, error) { +func insertSQL(table string, keyReplacement string, metrics []telegraf.Metric) (string, error) { rows := make([]string, len(metrics)) for i, m := range metrics { - cols := []interface{}{ hashID(m), m.Time().UTC(), @@ -90,7 +98,7 @@ func insertSQL(table string, metrics []telegraf.Metric) (string, error) { escapedCols := make([]string, len(cols)) for i, col := range cols { - escaped, err := escapeValue(col) + escaped, err := escapeValue(col, keyReplacement) if err != nil { return "", err } @@ -114,7 +122,7 @@ VALUES // inputs. // // [1] https://github.com/influxdata/telegraf/pull/3210#issuecomment-339273371 -func escapeValue(val interface{}) (string, error) { +func escapeValue(val interface{}, keyReplacement string) (string, error) { switch t := val.(type) { case string: return escapeString(t, `'`), nil @@ -126,18 +134,17 @@ func escapeValue(val interface{}) (string, error) { // possible value. if t <= uint64(MaxInt64) { return strconv.FormatInt(int64(t), 10), nil - } else { - return strconv.FormatInt(MaxInt64, 10), nil } + return strconv.FormatInt(MaxInt64, 10), nil case bool: return strconv.FormatBool(t), nil case time.Time: // see https://crate.io/docs/crate/reference/sql/data_types.html#timestamp - return escapeValue(t.Format("2006-01-02T15:04:05.999-0700")) + return escapeValue(t.Format("2006-01-02T15:04:05.999-0700"), keyReplacement) case map[string]string: - return escapeObject(convertMap(t)) + return escapeObject(convertMap(t), keyReplacement) case map[string]interface{}: - return escapeObject(t) + return escapeObject(t, keyReplacement) default: // This might be panic worthy under normal circumstances, but it's probably // better to not shut down the entire telegraf process because of one @@ -156,7 +163,7 @@ func convertMap(m map[string]string) map[string]interface{} { return c } -func escapeObject(m map[string]interface{}) (string, error) { +func escapeObject(m map[string]interface{}, keyReplacement string) (string, error) { // There is a decent chance that the implementation below doesn't catch all // edge cases, but it's hard to tell since the format seems to be a bit // underspecified. @@ -173,12 +180,15 @@ func escapeObject(m map[string]interface{}) (string, error) { // Now we build our key = val pairs pairs := make([]string, 0, len(m)) for _, k := range keys { - // escape the value of our key k (potentially recursive) - val, err := escapeValue(m[k]) + key := escapeString(strings.ReplaceAll(k, ".", keyReplacement), `"`) + + // escape the value of the value at k (potentially recursive) + val, err := escapeValue(m[k], keyReplacement) if err != nil { return "", err } - pairs = append(pairs, escapeString(k, `"`)+" = "+val) + + pairs = append(pairs, key+" = "+val) } return `{` + strings.Join(pairs, ", ") + `}`, nil } @@ -235,7 +245,7 @@ func (c *CrateDB) Close() error { func init() { outputs.Add("cratedb", func() telegraf.Output { return &CrateDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/outputs/cratedb/cratedb_test.go b/plugins/outputs/cratedb/cratedb_test.go index 0cd93e8273810..0bdfd8d3e2652 100644 --- a/plugins/outputs/cratedb/cratedb_test.go +++ b/plugins/outputs/cratedb/cratedb_test.go @@ -8,23 +8,21 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestConnectAndWriteIntegration(t *testing.T) { + t.Skip("Skipping due to trust authentication failure") if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" { t.Skip("Skipping test on CircleCI due to docker failures") } url := testURL() - table := "test" + table := "test-1" // dropSQL drops our table before each test. This simplifies changing the // schema during development :). @@ -38,7 +36,7 @@ func TestConnectAndWrite(t *testing.T) { c := &CrateDB{ URL: url, Table: table, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), TableCreate: true, } @@ -50,9 +48,9 @@ func TestConnectAndWrite(t *testing.T) { // the rows using their primary keys in order to take advantage of // read-after-write consistency in CrateDB. for _, m := range metrics { - hashIDVal, err := escapeValue(hashID(m)) + hashIDVal, err := escapeValue(hashID(m), "_") require.NoError(t, err) - timestamp, err := escapeValue(m.Time()) + timestamp, err := escapeValue(m.Time(), "_") require.NoError(t, err) var id int64 @@ -86,7 +84,7 @@ VALUES } for _, test := range tests { - if got, err := insertSQL("my_table", test.Metrics); err != nil { + if got, err := insertSQL("my_table", "_", test.Metrics); err != nil { t.Error(err) } else if got != test.Want { t.Errorf("got:\n%s\n\nwant:\n%s", got, test.Want) @@ -94,19 +92,13 @@ VALUES } } -func Test_escapeValue(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" { - t.Skip("Skipping test on CircleCI due to docker failures") - } +type escapeValueTest struct { + Value interface{} + Want string +} - tests := []struct { - Val interface{} - Want string - }{ +func escapeValueTests() []escapeValueTest { + return []escapeValueTest{ // string {`foo`, `'foo'`}, {`foo'bar 'yeah`, `'foo''bar ''yeah'`}, @@ -125,6 +117,7 @@ func Test_escapeValue(t *testing.T) { {map[string]string(nil), `{}`}, {map[string]string{"foo": "bar"}, `{"foo" = 'bar'}`}, {map[string]string{"foo": "bar", "one": "more"}, `{"foo" = 'bar', "one" = 'more'}`}, + {map[string]string{"f.oo": "bar", "o.n.e": "more"}, `{"f_oo" = 'bar', "o_n_e" = 'more'}`}, // map[string]interface{} {map[string]interface{}{}, `{}`}, {map[string]interface{}(nil), `{}`}, @@ -133,28 +126,47 @@ func Test_escapeValue(t *testing.T) { {map[string]interface{}{"foo": map[string]interface{}{"one": "more"}}, `{"foo" = {"one" = 'more'}}`}, {map[string]interface{}{`fo"o`: `b'ar`, `ab'c`: `xy"z`, `on"""e`: `mo'''re`}, `{"ab'c" = 'xy"z', "fo""o" = 'b''ar', "on""""""e" = 'mo''''''re'}`}, } +} - url := testURL() - db, err := sql.Open("pgx", url) +func Test_escapeValueIntegration(t *testing.T) { + t.Skip("Skipping due to trust authentication failure") + + if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" { + t.Skip("Skipping test on CircleCI due to docker failures") + } + + db, err := sql.Open("pgx", testURL()) require.NoError(t, err) defer db.Close() + tests := escapeValueTests() for _, test := range tests { - got, err := escapeValue(test.Val) - if err != nil { - t.Errorf("val: %#v: %s", test.Val, err) - } else if got != test.Want { - t.Errorf("got:\n%s\n\nwant:\n%s", got, test.Want) - } + got, err := escapeValue(test.Value, "_") + require.NoError(t, err, "value: %#v", test.Value) // This is a smoke test that will blow up if our escaping causing a SQL - // syntax error, which may allow for an attack. + // syntax error, which may allow for an attack.= var reply interface{} row := db.QueryRow("SELECT " + got) require.NoError(t, row.Scan(&reply)) } } +func Test_escapeValue(t *testing.T) { + tests := escapeValueTests() + for _, test := range tests { + got, err := escapeValue(test.Value, "_") + require.NoError(t, err, "value: %#v", test.Value) + require.Equal(t, got, test.Want) + } +} + +func Test_circumeventingStringEscape(t *testing.T) { + value, err := escapeObject(map[string]interface{}{"a.b": "c"}, `_"`) + require.NoError(t, err) + require.Equal(t, value, `{"a_""b" = 'c'}`) +} + func Test_hashID(t *testing.T) { tests := []struct { Name string @@ -205,19 +217,19 @@ func Test_hashID(t *testing.T) { } for i, test := range tests { - m, err := metric.New( + m := metric.New( test.Name, test.Tags, test.Fields, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) - require.NoError(t, err) if got := hashID(m); got != test.Want { t.Errorf("test #%d: got=%d want=%d", i, got, test.Want) } } } +//nolint:unused // Used in skipped tests func testURL() string { url := os.Getenv("CRATE_URL") if url == "" { diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md index ad1c7a02592e1..f9dd3fb0ef922 100644 --- a/plugins/outputs/datadog/README.md +++ b/plugins/outputs/datadog/README.md @@ -16,6 +16,9 @@ This plugin writes to the [Datadog Metrics API][metrics] and requires an ## Write URL override; useful for debugging. # url = "https://app.datadoghq.com/api/v1/series" + + ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) + # http_proxy_url = "http://localhost:8888" ``` ### Metrics diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 2d1a937883655..47d8a4e91a43b 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -4,23 +4,26 @@ import ( "bytes" "encoding/json" "fmt" - "log" "math" "net/http" "net/url" "strings" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/outputs" ) type Datadog struct { - Apikey string - Timeout internal.Duration + Apikey string `toml:"apikey"` + Timeout config.Duration `toml:"timeout"` + URL string `toml:"url"` + Log telegraf.Logger `toml:"-"` - URL string `toml:"url"` client *http.Client + proxy.HTTPProxy } var sampleConfig = ` @@ -32,6 +35,9 @@ var sampleConfig = ` ## Write URL override; useful for debugging. # url = "https://app.datadoghq.com/api/v1/series" + + ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) + # http_proxy_url = "http://localhost:8888" ` type TimeSeries struct { @@ -47,18 +53,23 @@ type Metric struct { type Point [2]float64 -const datadog_api = "https://app.datadoghq.com/api/v1/series" +const datadogAPI = "https://app.datadoghq.com/api/v1/series" func (d *Datadog) Connect() error { if d.Apikey == "" { return fmt.Errorf("apikey is a required field for datadog output") } + proxyFunc, err := d.Proxy() + if err != nil { + return err + } + d.client = &http.Client{ Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: proxyFunc, }, - Timeout: d.Timeout.Duration, + Timeout: time.Duration(d.Timeout), } return nil } @@ -96,7 +107,7 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { metricCounter++ } } else { - log.Printf("I! unable to build Metric for %s due to error '%v', skipping\n", m.Name(), err) + d.Log.Infof("Unable to build Metric for %s due to error '%v', skipping", m.Name(), err) } } @@ -104,27 +115,27 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { return nil } - redactedApiKey := "****************" + redactedAPIKey := "****************" ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { - return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error()) + return fmt.Errorf("unable to marshal TimeSeries, %s", err.Error()) } - req, err := http.NewRequest("POST", d.authenticatedUrl(), bytes.NewBuffer(tsBytes)) + req, err := http.NewRequest("POST", d.authenticatedURL(), bytes.NewBuffer(tsBytes)) if err != nil { - return fmt.Errorf("unable to create http.Request, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) + return fmt.Errorf("unable to create http.Request, %s", strings.Replace(err.Error(), d.Apikey, redactedAPIKey, -1)) } req.Header.Add("Content-Type", "application/json") resp, err := d.client.Do(req) if err != nil { - return fmt.Errorf("error POSTing metrics, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) + return fmt.Errorf("error POSTing metrics, %s", strings.Replace(err.Error(), d.Apikey, redactedAPIKey, -1)) } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 209 { - return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) + return fmt.Errorf("received bad status code, %d", resp.StatusCode) } return nil @@ -138,7 +149,7 @@ func (d *Datadog) Description() string { return "Configuration for DataDog API to send metrics to." } -func (d *Datadog) authenticatedUrl() string { +func (d *Datadog) authenticatedURL() string { q := url.Values{ "api_key": []string{d.Apikey}, } @@ -166,7 +177,7 @@ func buildTags(tagList []*telegraf.Tag) []string { index := 0 for _, tag := range tagList { tags[index] = fmt.Sprintf("%s:%s", tag.Key, tag.Value) - index += 1 + index++ } return tags } @@ -208,7 +219,7 @@ func (d *Datadog) Close() error { func init() { outputs.Add("datadog", func() telegraf.Output { return &Datadog{ - URL: datadog_api, + URL: datadogAPI, } }) } diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index be8541ee8a92d..c893833b44398 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -17,8 +17,8 @@ import ( ) var ( - fakeUrl = "http://test.datadog.com" - fakeApiKey = "123456" + fakeURL = "http://test.datadog.com" + fakeAPIKey = "123456" ) func NewDatadog(url string) *Datadog { @@ -28,8 +28,8 @@ func NewDatadog(url string) *Datadog { } func fakeDatadog() *Datadog { - d := NewDatadog(fakeUrl) - d.Apikey = fakeApiKey + d := NewDatadog(fakeURL) + d.Apikey = fakeAPIKey return d } @@ -67,15 +67,15 @@ func TestBadStatusCode(t *testing.T) { if err == nil { t.Errorf("error expected but none returned") } else { - require.EqualError(t, fmt.Errorf("received bad status code, 500\n"), err.Error()) + require.EqualError(t, fmt.Errorf("received bad status code, 500"), err.Error()) } } func TestAuthenticatedUrl(t *testing.T) { d := fakeDatadog() - authUrl := d.authenticatedUrl() - assert.EqualValues(t, fmt.Sprintf("%s?api_key=%s", fakeUrl, fakeApiKey), authUrl) + authURL := d.authenticatedURL() + assert.EqualValues(t, fmt.Sprintf("%s?api_key=%s", fakeURL, fakeAPIKey), authURL) } func TestBuildTags(t *testing.T) { diff --git a/plugins/outputs/discard/discard.go b/plugins/outputs/discard/discard.go index 919f74b477ffa..de3696c3e6148 100644 --- a/plugins/outputs/discard/discard.go +++ b/plugins/outputs/discard/discard.go @@ -11,7 +11,7 @@ func (d *Discard) Connect() error { return nil } func (d *Discard) Close() error { return nil } func (d *Discard) SampleConfig() string { return "" } func (d *Discard) Description() string { return "Send metrics to nowhere at all" } -func (d *Discard) Write(metrics []telegraf.Metric) error { +func (d *Discard) Write(_ []telegraf.Metric) error { return nil } diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 78a7ee4990067..f25b8708942d6 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -1,27 +1,59 @@ # Dynatrace Output Plugin -This plugin is sending telegraf metrics to [Dynatrace](www.dynatrace.com). It has two operational modes. +This plugin sends Telegraf metrics to [Dynatrace](https://www.dynatrace.com) via the [Dynatrace Metrics API V2](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/). It may be run alongside the Dynatrace OneAgent for automatic authentication or it may be run standalone on a host without a OneAgent by specifying a URL and API Token. +More information on the plugin can be found in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). +All metrics are reported as gauges, unless they are specified to be delta counters using the `additional_counters` config option (see below). +See the [Dynatrace Metrics ingestion protocol documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol) for details on the types defined there. -Telegraf minimum version: Telegraf 1.16 -Plugin minimum tested version: 1.16 +## Requirements + +You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. + +- Telegraf minimum version: Telegraf 1.16 + +## Getting Started + +Setting up Telegraf is explained in the [Telegraf Documentation](https://docs.influxdata.com/telegraf/latest/introduction/getting-started/). +The Dynatrace exporter may be enabled by adding an `[[outputs.dynatrace]]` section to your `telegraf.conf` config file. +All configurations are optional, but if a `url` other than the OneAgent metric ingestion endpoint is specified then an `api_token` is required. +To see all available options, see [Configuration](#configuration) below. + +### Running alongside Dynatrace OneAgent (preferred) + +If you run the Telegraf agent on a host or VM that is monitored by the Dynatrace OneAgent then you only need to enable the plugin, but need no further configuration. The Dynatrace Telegraf output plugin will send all metrics to the OneAgent which will use its secure and load balanced connection to send the metrics to your Dynatrace SaaS or Managed environment. +Depending on your environment, you might have to enable metrics ingestion on the OneAgent first as described in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). -## Running alongside Dynatrace OneAgent +Note: The name and identifier of the host running Telegraf will be added as a dimension to every metric. If this is undesirable, then the output plugin may be used in standalone mode using the directions below. -if you run the Telegraf agent on a host or VM that is monitored by the Dynatrace OneAgent then you only need to enable the plugin but need no further configuration. The Dynatrace telegraf output plugin will send all metrics to the OneAgent which will use its secure and load balanced connection to send the metrics to your Dynatrace SaaS or Managed environment. +```toml +[[outputs.dynatrace]] + ## No options are required. By default, metrics will be exported via the OneAgent on the local host. +``` -## Running standalone +### Running standalone If you run the Telegraf agent on a host or VM without a OneAgent you will need to configure the environment API endpoint to send the metrics to and an API token for security. -The endpoint for the Dynatrace Metrics API is +You will also need to configure an API token for secure access. Find out how to create a token in the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/basics/dynatrace-api-authentication/) or simply navigate to **Settings > Integration > Dynatrace API** in your Dynatrace environment and create a token with Dynatrace API and create a new token with +'Ingest metrics' (`metrics.ingest`) scope enabled. It is recommended to limit Token scope to only this permission. -* Managed https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest -* SaaS https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest +The endpoint for the Dynatrace Metrics API v2 is -You can learn more about how to use the Dynatrace API [here](https://www.dynatrace.com/support/help/dynatrace-api/) +* on Dynatrace Managed: `https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest` +* on Dynatrace SaaS: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest` -You will also need to configure an API token for secure access. Find out how to create a token [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/tokens/) or simply navigate to **Settings > Integration > Dynatrace API** in your Dynatrace environment and create a token with Dynatrace API and create a new token with -'Ingest metrics data points' access scope enabled. +```toml +[[outputs.dynatrace]] + ## If no OneAgent is running on the host, url and api_token need to be set + + ## Dynatrace Metrics Ingest v2 endpoint to receive metrics + url = "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" + + ## API token is required if a URL is specified and should be restricted to the 'Ingest metrics' scope + api_token = "your API token here" // hard-coded for illustration only, should be read from environment +``` + +You can learn more about how to use the Dynatrace API [here](https://www.dynatrace.com/support/help/dynatrace-api/). ## Configuration @@ -31,17 +63,81 @@ You will also need to configure an API token for secure access. Find out how to ## Set Dynatrace environment URL (e.g.: https://YOUR_DOMAIN/api/v2/metrics/ingest) if you do not use a OneAgent url = "" api_token = "" - ## Optional prefix for metric names (e.g.: "telegraf.") - prefix = "telegraf." + ## Optional prefix for metric names (e.g.: "telegraf") + prefix = "telegraf" ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default insecure_skip_verify = false + ## If you want metrics to be treated and reported as delta counters, add the metric names here + additional_counters = [ ] + ## Optional dimensions to be added to every metric + [outputs.dynatrace.default_dimensions] + default_key = "default value" ``` -## Requirements +### `url` + +*required*: `false` -You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. Monotonic counters (e.g. diskio.reads, system.uptime) require release 208 or later. -You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher +*default*: Local OneAgent endpoint + +Set your Dynatrace environment URL (e.g.: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest`, see the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/post-ingest-metrics/) for details) if you do not use a OneAgent or wish to export metrics directly to a Dynatrace metrics v2 endpoint. If a URL is set to anything other than the local OneAgent endpoint, then an API token is required. + +```toml +url = "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +``` + +### `api_token` + +*required*: `false` unless `url` is specified + +API token is required if a URL other than the OneAgent endpoint is specified and it should be restricted to the 'Ingest metrics' scope. + +```toml +api_token = "your API token here" +``` + +### `prefix` + +*required*: `false` + +Optional prefix to be prepended to all metric names (will be separated with a `.`). + +```toml +prefix = "telegraf" +``` + +### `insecure_skip_verify` + +*required*: `false` + +Setting this option to true skips TLS verification for testing or when using self-signed certificates. + +```toml +insecure_skip_verify = false +``` + +### `additional_counters` + +*required*: `false` + +If you want a metric to be treated and reported as a delta counter, add its name to this list. + +```toml +additional_counters = [ ] +``` + +### `default_dimensions` + +*required*: `false` + +Default dimensions that will be added to every exported metric. + +```toml +[outputs.dynatrace.default_dimensions] +default_key = "default value" +``` ## Limitations -Telegraf measurements which can't be converted to a float64 are skipped. + +Telegraf measurements which can't be converted to a number are skipped. diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 8c8fa984d82a8..adf74ea48a232 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -3,45 +3,39 @@ package dynatrace import ( "bytes" "fmt" - "io/ioutil" - "math" + "io" "net/http" - "regexp" - "sort" - "strconv" "strings" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" -) - -const ( - oneAgentMetricsUrl = "http://127.0.0.1:14499/metrics/ingest" -) -var ( - reNameAllowedCharList = regexp.MustCompile("[^A-Za-z0-9.-]+") - maxDimKeyLen = 100 - maxMetricKeyLen = 250 + dtMetric "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric" + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/apiconstants" + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" ) -var counts map[string]string -var sent = 0 - // Dynatrace Configuration for the Dynatrace output plugin type Dynatrace struct { - URL string `toml:"url"` - APIToken string `toml:"api_token"` - Prefix string `toml:"prefix"` - Log telegraf.Logger `toml:"-"` - Timeout internal.Duration `toml:"timeout"` + URL string `toml:"url"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + Log telegraf.Logger `toml:"-"` + Timeout config.Duration `toml:"timeout"` + AddCounterMetrics []string `toml:"additional_counters"` + DefaultDimensions map[string]string `toml:"default_dimensions"` + + normalizedDefaultDimensions dimensions.NormalizedDimensionList + normalizedStaticDimensions dimensions.NormalizedDimensionList tls.ClientConfig client *http.Client + + loggedMetrics map[string]bool // New empty set } const sampleConfig = ` @@ -60,8 +54,8 @@ const sampleConfig = ` ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. api_token = "" - ## Optional prefix for metric names (e.g.: "telegraf.") - prefix = "telegraf." + ## Optional prefix for metric names (e.g.: "telegraf") + prefix = "telegraf" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -74,6 +68,13 @@ const sampleConfig = ` ## Connection timeout, defaults to "5s" if not set. timeout = "5s" + + ## If you want metrics to be treated and reported as delta counters, add the metric names here + additional_counters = [ ] + + ## Optional dimensions to be added to every metric + # [outputs.dynatrace.default_dimensions] + # default_key = "default value" ` // Connect Connects the Dynatrace output plugin to the Telegraf stream @@ -97,135 +98,96 @@ func (d *Dynatrace) Description() string { return "Send telegraf metrics to a Dynatrace environment" } -// Normalizes a metric keys or metric dimension identifiers -// according to Dynatrace format. -func (d *Dynatrace) normalize(s string, max int) (string, error) { - s = reNameAllowedCharList.ReplaceAllString(s, "_") - - // Strip Digits and underscores if they are at the beginning of the string - normalizedString := strings.TrimLeft(s, "_0123456789") - - for strings.HasPrefix(normalizedString, "_") { - normalizedString = normalizedString[1:] - } - - if len(normalizedString) > max { - normalizedString = normalizedString[:max] - } - - for strings.HasSuffix(normalizedString, "_") { - normalizedString = normalizedString[:len(normalizedString)-1] - } - - if len(normalizedString) == 0 { - return "", fmt.Errorf("error normalizing the string: %s", s) - } - return normalizedString, nil -} - -func (d *Dynatrace) escape(v string) string { - return strconv.Quote(v) -} - func (d *Dynatrace) Write(metrics []telegraf.Metric) error { - var buf bytes.Buffer - var tagb bytes.Buffer if len(metrics) == 0 { return nil } - for _, metric := range metrics { - // first write the tags into a buffer - tagb.Reset() - if len(metric.Tags()) > 0 { - keys := make([]string, 0, len(metric.Tags())) - for k := range metric.Tags() { - keys = append(keys, k) - } - // sort tag keys to expect the same order in ech run - sort.Strings(keys) + lines := []string{} - for _, k := range keys { - tagKey, err := d.normalize(k, maxDimKeyLen) - if err != nil { + for _, tm := range metrics { + dims := []dimensions.Dimension{} + for _, tag := range tm.TagList() { + // Ignore special tags for histogram and summary types. + switch tm.Type() { + case telegraf.Histogram: + if tag.Key == "le" || tag.Key == "gt" { continue } - fmt.Fprintf(&tagb, ",%s=%s", strings.ToLower(tagKey), d.escape(metric.Tags()[k])) - - } - } - if len(metric.Fields()) > 0 { - for k, v := range metric.Fields() { - var value string - switch v := v.(type) { - case string: - continue - case float64: - if !math.IsNaN(v) && !math.IsInf(v, 0) { - value = fmt.Sprintf("%f", v) - } else { - continue - } - case uint64: - value = strconv.FormatUint(v, 10) - case int64: - value = strconv.FormatInt(v, 10) - case bool: - if v { - value = "1" - } else { - value = "0" - } - default: - d.Log.Debugf("Dynatrace type not supported! %s", v) + case telegraf.Summary: + if tag.Key == "quantile" { continue } + } + dims = append(dims, dimensions.NewDimension(tag.Key, tag.Value)) + } - // metric name - metricKey, err := d.normalize(k, maxMetricKeyLen) - if err != nil { - continue - } + for _, field := range tm.FieldList() { + metricName := tm.Name() + "." + field.Key - metricID, err := d.normalize(d.Prefix+metric.Name()+"."+metricKey, maxMetricKeyLen) - // write metric name combined with its field - if err != nil { - continue - } - // write metric id,tags and value - switch metric.Type() { - case telegraf.Counter: - if lastvalue, ok := counts[metricID+tagb.String()]; ok { - // only send a counter if a lastvalue is found in the map - // if last value is found we can calc and send the delta value - if v, err := strconv.ParseFloat(lastvalue, 32); err == nil { - if v2, err := strconv.ParseFloat(value, 32); err == nil { - fmt.Fprintf(&buf, "%s%s count,delta=%f\n", metricID, tagb.String(), v2-v) - } - } - } - // put the current value into the map as last value - counts[metricID+tagb.String()] = value - default: - fmt.Fprintf(&buf, "%s%s %v\n", metricID, tagb.String(), value) + typeOpt := d.getTypeOption(tm, field) + + if typeOpt == nil { + // Unsupported type. Log only once per unsupported metric name + if !d.loggedMetrics[metricName] { + d.Log.Warnf("Unsupported type for %s", metricName) + d.loggedMetrics[metricName] = true } + continue } + + name := tm.Name() + "." + field.Key + dm, err := dtMetric.NewMetric( + name, + dtMetric.WithPrefix(d.Prefix), + dtMetric.WithDimensions( + dimensions.MergeLists( + d.normalizedDefaultDimensions, + dimensions.NewNormalizedDimensionList(dims...), + d.normalizedStaticDimensions, + ), + ), + dtMetric.WithTimestamp(tm.Time()), + typeOpt, + ) + + if err != nil { + d.Log.Warn(fmt.Sprintf("failed to normalize metric: %s - %s", name, err.Error())) + continue + } + + line, err := dm.Serialize() + + if err != nil { + d.Log.Warn(fmt.Sprintf("failed to serialize metric: %s - %s", name, err.Error())) + continue + } + + lines = append(lines, line) } } - sent++ - // in typical interval of 10s, we will clean the counter state once in 24h which is 8640 iterations - if sent%8640 == 0 { - counts = make(map[string]string) + + limit := apiconstants.GetPayloadLinesLimit() + for i := 0; i < len(lines); i += limit { + batch := lines[i:min(i+limit, len(lines))] + + output := strings.Join(batch, "\n") + if output != "" { + if err := d.send(output); err != nil { + return fmt.Errorf("error processing data:, %s", err.Error()) + } + } } - return d.send(buf.Bytes()) + + return nil } -func (d *Dynatrace) send(msg []byte) error { +func (d *Dynatrace) send(msg string) error { var err error - req, err := http.NewRequest("POST", d.URL, bytes.NewBuffer(msg)) + req, err := http.NewRequest("POST", d.URL, bytes.NewBufferString(msg)) if err != nil { d.Log.Errorf("Dynatrace error: %s", err.Error()) - return fmt.Errorf("Dynatrace error while creating HTTP request:, %s", err.Error()) + return fmt.Errorf("error while creating HTTP request:, %s", err.Error()) } req.Header.Add("Content-Type", "text/plain; charset=UTF-8") @@ -238,33 +200,31 @@ func (d *Dynatrace) send(msg []byte) error { resp, err := d.client.Do(req) if err != nil { d.Log.Errorf("Dynatrace error: %s", err.Error()) - fmt.Println(req) - return fmt.Errorf("Dynatrace error while sending HTTP request:, %s", err.Error()) + return fmt.Errorf("error while sending HTTP request:, %s", err.Error()) } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusBadRequest { + return fmt.Errorf("request failed with response code:, %d", resp.StatusCode) + } + // print metric line results as info log - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusAccepted { - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - d.Log.Errorf("Dynatrace error reading response") - } - bodyString := string(bodyBytes) - d.Log.Debugf("Dynatrace returned: %s", bodyString) - } else { - return fmt.Errorf("Dynatrace request failed with response code:, %d", resp.StatusCode) + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + d.Log.Errorf("Dynatrace error reading response") } + bodyString := string(bodyBytes) + d.Log.Debugf("Dynatrace returned: %s", bodyString) return nil } func (d *Dynatrace) Init() error { - counts = make(map[string]string) if len(d.URL) == 0 { d.Log.Infof("Dynatrace URL is empty, defaulting to OneAgent metrics interface") - d.URL = oneAgentMetricsUrl + d.URL = apiconstants.GetDefaultOneAgentEndpoint() } - if d.URL != oneAgentMetricsUrl && len(d.APIToken) == 0 { + if d.URL != apiconstants.GetDefaultOneAgentEndpoint() && len(d.APIToken) == 0 { d.Log.Errorf("Dynatrace api_token is a required field for Dynatrace output") return fmt.Errorf("api_token is a required field for Dynatrace output") } @@ -279,15 +239,66 @@ func (d *Dynatrace) Init() error { Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsCfg, }, - Timeout: d.Timeout.Duration, + Timeout: time.Duration(d.Timeout), } + + dims := []dimensions.Dimension{} + for key, value := range d.DefaultDimensions { + dims = append(dims, dimensions.NewDimension(key, value)) + } + d.normalizedDefaultDimensions = dimensions.NewNormalizedDimensionList(dims...) + d.normalizedStaticDimensions = dimensions.NewNormalizedDimensionList(dimensions.NewDimension("dt.metrics.source", "telegraf")) + d.loggedMetrics = make(map[string]bool) + return nil } func init() { outputs.Add("dynatrace", func() telegraf.Output { return &Dynatrace{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } + +func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field) dtMetric.MetricOption { + metricName := metric.Name() + "." + field.Key + for _, i := range d.AddCounterMetrics { + if metricName != i { + continue + } + switch v := field.Value.(type) { + case float64: + return dtMetric.WithFloatCounterValueDelta(v) + case uint64: + return dtMetric.WithIntCounterValueDelta(int64(v)) + case int64: + return dtMetric.WithIntCounterValueDelta(v) + default: + return nil + } + } + + switch v := field.Value.(type) { + case float64: + return dtMetric.WithFloatGaugeValue(v) + case uint64: + return dtMetric.WithIntGaugeValue(int64(v)) + case int64: + return dtMetric.WithIntGaugeValue(v) + case bool: + if v { + return dtMetric.WithIntGaugeValue(1) + } + return dtMetric.WithIntGaugeValue(0) + } + + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index cf6549c72ff11..0ed7cf4cf1195 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -2,27 +2,35 @@ package dynatrace import ( "encoding/json" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" - "io/ioutil" + "fmt" + "io" "net/http" "net/http/httptest" + "regexp" + "sort" + "strings" "testing" "time" + + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/apiconstants" + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestNilMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() d := &Dynatrace{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } d.URL = ts.URL @@ -41,7 +49,8 @@ func TestNilMetrics(t *testing.T) { func TestEmptyMetricsSlice(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -64,7 +73,8 @@ func TestEmptyMetricsSlice(t *testing.T) { func TestMockURL(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -87,9 +97,10 @@ func TestMissingURL(t *testing.T) { d.Log = testutil.Logger{} err := d.Init() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.NoError(t, err) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) err = d.Connect() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) require.NoError(t, err) } @@ -98,9 +109,10 @@ func TestMissingAPITokenMissingURL(t *testing.T) { d.Log = testutil.Logger{} err := d.Init() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.NoError(t, err) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) err = d.Connect() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) require.NoError(t, err) } @@ -113,28 +125,38 @@ func TestMissingAPIToken(t *testing.T) { require.Error(t, err) } -func TestSendMetric(t *testing.T) { +func TestSendMetrics(t *testing.T) { + expected := []string{} + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,host=\"192.168.0.1\",nix=\"nix\" 3.140000\nmymeasurement.value,host=\"192.168.0.1\" 3.140000\n" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) + + lines := strings.Split(bodyString, "\n") + + sort.Strings(lines) + sort.Strings(expected) + + expectedString := strings.Join(expected, "\n") + foundString := strings.Join(lines, "\n") + if foundString != expectedString { + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expectedString, foundString) } w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))) + require.NoError(t, err) })) defer ts.Close() - d := &Dynatrace{} + d := &Dynatrace{ + URL: ts.URL, + APIToken: "123", + Log: testutil.Logger{}, + AddCounterMetrics: []string{}, + } - d.URL = ts.URL - d.APIToken = "123" - d.Log = testutil.Logger{} err := d.Init() require.NoError(t, err) err = d.Connect() @@ -142,21 +164,43 @@ func TestSendMetric(t *testing.T) { // Init metrics - m1, _ := metric.New( - "mymeasurement", - map[string]string{"host": "192.168.0.1", "nix": "nix"}, - map[string]interface{}{"myfield": float64(3.14)}, + // Simple metrics are exported as a gauge unless in additional_counters + expected = append(expected, "simple_metric.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "simple_metric.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "simple_metric.counter") + m1 := metric.New( + "simple_metric", + map[string]string{}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( - "mymeasurement", - map[string]string{"host": "192.168.0.1"}, - map[string]interface{}{"value": float64(3.14)}, + // Even if Type() returns counter, all metrics are treated as a gauge unless explicitly added to additional_counters + expected = append(expected, "counter_type.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "counter_type.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "counter_type.counter") + m2 := metric.New( + "counter_type", + map[string]string{}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + telegraf.Counter, ) - metrics := []telegraf.Metric{m1, m2} + expected = append(expected, "complex_metric.int,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.int64,dt.metrics.source=telegraf gauge,2 1289430000000") + expected = append(expected, "complex_metric.float,dt.metrics.source=telegraf gauge,3 1289430000000") + expected = append(expected, "complex_metric.float64,dt.metrics.source=telegraf gauge,4 1289430000000") + expected = append(expected, "complex_metric.true,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.false,dt.metrics.source=telegraf gauge,0 1289430000000") + m3 := metric.New( + "complex_metric", + map[string]string{}, + map[string]interface{}{"int": 1, "int64": int64(2), "float": 3.0, "float64": float64(4.0), "true": true, "false": false}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1, m2, m3} err = d.Write(metrics) require.NoError(t, err) @@ -165,17 +209,20 @@ func TestSendMetric(t *testing.T) { func TestSendSingleMetricWithUnorderedTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,a=\"test\",b=\"test\",c=\"test\" 3.140000\n" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) - } + // use regex because dimension order isn't guaranteed + require.Equal(t, len(bodyString), 94) + require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) + require.Regexp(t, regexp.MustCompile(`a=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`b=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`c=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`dt.metrics.source=telegraf`), bodyString) + require.Regexp(t, regexp.MustCompile(`gauge,3.14 1289430000000$`), bodyString) w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -191,7 +238,7 @@ func TestSendSingleMetricWithUnorderedTags(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"a": "test", "c": "test", "b": "test"}, map[string]interface{}{"myfield": float64(3.14)}, @@ -208,16 +255,15 @@ func TestSendMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield 3.140000\n" + expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000" if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) } - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -233,7 +279,7 @@ func TestSendMetricWithoutTags(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{}, map[string]interface{}{"myfield": float64(3.14)}, @@ -250,16 +296,21 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,aaa=\"test\",b_b=\"test\",ccc=\"test\" 3.140000\n" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) - } - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + + // use regex because dimension order isn't guaranteed + require.Equal(t, len(bodyString), 100) + require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) + require.Regexp(t, regexp.MustCompile(`aaa=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`b_b=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`ccc=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`dt.metrics.source=telegraf`), bodyString) + require.Regexp(t, regexp.MustCompile(`gauge,3.14 1289430000000$`), bodyString) + + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -275,7 +326,7 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"AAA": "test", "CcC": "test", "B B": "test"}, map[string]interface{}{"myfield": float64(3.14)}, @@ -292,16 +343,15 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield 1\n" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) - } - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + // use regex because field order isn't guaranteed + require.Equal(t, len(bodyString), 132) + require.Contains(t, bodyString, "mymeasurement.yes,dt.metrics.source=telegraf gauge,1 1289430000000") + require.Contains(t, bodyString, "mymeasurement.no,dt.metrics.source=telegraf gauge,0 1289430000000") + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -317,10 +367,184 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( + "mymeasurement", + map[string]string{}, + map[string]interface{}{"yes": true, "no": false}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + +func TestSendMetricWithDefaultDimensions(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // check the encoded result + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + bodyString := string(bodyBytes) + // use regex because field order isn't guaranteed + require.Equal(t, len(bodyString), 78) + require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) + require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) + require.Regexp(t, regexp.MustCompile("dim=value"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,2 1289430000000$"), bodyString) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) + })) + defer ts.Close() + + d := &Dynatrace{DefaultDimensions: map[string]string{"dim": "value"}} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = testutil.Logger{} + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{}, + map[string]interface{}{"value": 2}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + +func TestMetricDimensionsOverrideDefault(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // check the encoded result + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + bodyString := string(bodyBytes) + // use regex because field order isn't guaranteed + require.Equal(t, len(bodyString), 80) + require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) + require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) + require.Regexp(t, regexp.MustCompile("dim=metric"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) + })) + defer ts.Close() + + d := &Dynatrace{DefaultDimensions: map[string]string{"dim": "default"}} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = testutil.Logger{} + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{"dim": "metric"}, + map[string]interface{}{"value": 32}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + +func TestStaticDimensionsOverrideMetric(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // check the encoded result + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + bodyString := string(bodyBytes) + // use regex because field order isn't guaranteed + require.Equal(t, len(bodyString), 53) + require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) + require.Regexp(t, regexp.MustCompile("dim=static"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) + })) + defer ts.Close() + + d := &Dynatrace{DefaultDimensions: map[string]string{"dim": "default"}} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = testutil.Logger{} + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + d.normalizedStaticDimensions = dimensions.NewNormalizedDimensionList(dimensions.NewDimension("dim", "static")) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{"dim": "metric"}, + map[string]interface{}{"value": 32}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + +var warnfCalledTimes int + +type loggerStub struct { + testutil.Logger +} + +func (l loggerStub) Warnf(format string, args ...interface{}) { + warnfCalledTimes++ +} + +func TestSendUnsupportedMetric(t *testing.T) { + warnfCalledTimes = 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Fatal("should not export because the only metric is an invalid type") + })) + defer ts.Close() + + d := &Dynatrace{} + + logStub := loggerStub{} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = logStub + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + // Init metrics + + m1 := metric.New( "mymeasurement", map[string]string{}, - map[string]interface{}{"myfield": bool(true)}, + map[string]interface{}{"metric1": "unsupported_type"}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) @@ -328,4 +552,30 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { err = d.Write(metrics) require.NoError(t, err) + // Warnf called for invalid export + require.Equal(t, 1, warnfCalledTimes) + + err = d.Write(metrics) + require.NoError(t, err) + // Warnf skipped for more invalid exports with the same name + require.Equal(t, 1, warnfCalledTimes) + + m2 := metric.New( + "mymeasurement", + map[string]string{}, + map[string]interface{}{"metric2": "unsupported_type"}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics = []telegraf.Metric{m2} + + err = d.Write(metrics) + require.NoError(t, err) + // Warnf called again for invalid export with a new metric name + require.Equal(t, 2, warnfCalledTimes) + + err = d.Write(metrics) + require.NoError(t, err) + // Warnf skipped for more invalid exports with the same name + require.Equal(t, 2, warnfCalledTimes) } diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index b17a945b3523c..8f57f4e12ebf5 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -14,7 +14,7 @@ import ( "crypto/sha256" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "gopkg.in/olivere/elastic.v5" @@ -28,12 +28,13 @@ type Elasticsearch struct { Username string Password string EnableSniffer bool - Timeout internal.Duration - HealthCheckInterval internal.Duration + Timeout config.Duration + HealthCheckInterval config.Duration + EnableGzip bool ManageTemplate bool TemplateName string OverwriteTemplate bool - ForceDocumentId bool + ForceDocumentID bool `toml:"force_document_id"` MajorReleaseNumber int tls.ClientConfig @@ -50,6 +51,8 @@ var sampleConfig = ` ## Set to true to ask Elasticsearch a list of all cluster nodes, ## thus it is not necessary to list all nodes in the urls config option. enable_sniffer = false + ## Set to true to enable gzip compression + enable_gzip = false ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" @@ -174,7 +177,7 @@ func (a *Elasticsearch) Connect() error { return fmt.Errorf("Elasticsearch urls or index_name is not defined") } - ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) defer cancel() var clientOptions []elastic.ClientOptionFunc @@ -189,14 +192,15 @@ func (a *Elasticsearch) Connect() error { httpclient := &http.Client{ Transport: tr, - Timeout: a.Timeout.Duration, + Timeout: time.Duration(a.Timeout), } clientOptions = append(clientOptions, elastic.SetHttpClient(httpclient), elastic.SetSniff(a.EnableSniffer), elastic.SetURL(a.URLs...), - elastic.SetHealthcheckInterval(a.HealthCheckInterval.Duration), + elastic.SetHealthcheckInterval(time.Duration(a.HealthCheckInterval)), + elastic.SetGzip(a.EnableGzip), ) if a.Username != "" && a.Password != "" { @@ -205,7 +209,7 @@ func (a *Elasticsearch) Connect() error { ) } - if a.HealthCheckInterval.Duration == 0 { + if time.Duration(a.HealthCheckInterval) == 0 { clientOptions = append(clientOptions, elastic.SetHealthcheck(false), ) @@ -250,7 +254,6 @@ func (a *Elasticsearch) Connect() error { // GetPointID generates a unique ID for a Metric Point func GetPointID(m telegraf.Metric) string { - var buffer bytes.Buffer //Timestamp(ns),measurement name and Series Hash for compute the final SHA256 based hash ID @@ -284,7 +287,7 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { br := elastic.NewBulkIndexRequest().Index(indexName).Doc(m) - if a.ForceDocumentId { + if a.ForceDocumentID { id := GetPointID(metric) br.Id(id) } @@ -294,10 +297,9 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { } bulkRequest.Add(br) - } - ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) defer cancel() res, err := bulkRequest.Do(ctx) @@ -309,12 +311,12 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { if res.Errors { for id, err := range res.Failed() { log.Printf("E! Elasticsearch indexing failure, id: %d, error: %s, caused by: %s, %s", id, err.Error.Reason, err.Error.CausedBy["reason"], err.Error.CausedBy["type"]) + break } return fmt.Errorf("W! Elasticsearch failed to index %d metrics", len(res.Failed())) } return nil - } func (a *Elasticsearch) manageTemplate(ctx context.Context) error { @@ -359,17 +361,13 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error { } log.Printf("D! Elasticsearch template %s created or updated\n", a.TemplateName) - } else { - log.Println("D! Found existing Elasticsearch template. Skipping template management") - } return nil } func (a *Elasticsearch) GetTagKeys(indexName string) (string, []string) { - tagKeys := []string{} startTag := strings.Index(indexName, "{{") @@ -378,7 +376,6 @@ func (a *Elasticsearch) GetTagKeys(indexName string) (string, []string) { if endTag < 0 { startTag = -1 - } else { tagName := indexName[startTag+2 : endTag] @@ -422,7 +419,6 @@ func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time, tagK } return fmt.Sprintf(indexName, tagValues...) - } func getISOWeek(eventTime time.Time) string { @@ -446,8 +442,8 @@ func (a *Elasticsearch) Close() error { func init() { outputs.Add("elasticsearch", func() telegraf.Output { return &Elasticsearch{ - Timeout: internal.Duration{Duration: time.Second * 5}, - HealthCheckInterval: internal.Duration{Duration: time.Second * 10}, + Timeout: config.Duration(time.Second * 5), + HealthCheckInterval: config.Duration(time.Second * 10), } }) } diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index e2a583402dfcc..7ad1e632c6d20 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -2,16 +2,18 @@ package elasticsearch import ( "context" + "net/http" + "net/http/httptest" "reflect" "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -21,11 +23,12 @@ func TestConnectAndWrite(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "test-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: false, - HealthCheckInterval: internal.Duration{Duration: time.Second * 10}, + HealthCheckInterval: config.Duration(time.Second * 10), } // Verify that we can connect to Elasticsearch @@ -35,10 +38,9 @@ func TestConnectAndWrite(t *testing.T) { // Verify that we can successfully write data to Elasticsearch err = e.Write(testutil.MockMetrics()) require.NoError(t, err) - } -func TestTemplateManagementEmptyTemplate(t *testing.T) { +func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -50,7 +52,8 @@ func TestTemplateManagementEmptyTemplate(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "test-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "", OverwriteTemplate: true, @@ -58,10 +61,9 @@ func TestTemplateManagementEmptyTemplate(t *testing.T) { err := e.manageTemplate(ctx) require.Error(t, err) - } -func TestTemplateManagement(t *testing.T) { +func TestTemplateManagementIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -71,13 +73,14 @@ func TestTemplateManagement(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "test-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, } - ctx, cancel := context.WithTimeout(context.Background(), e.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) defer cancel() err := e.Connect() @@ -87,7 +90,7 @@ func TestTemplateManagement(t *testing.T) { require.NoError(t, err) } -func TestTemplateInvalidIndexPattern(t *testing.T) { +func TestTemplateInvalidIndexPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -97,7 +100,8 @@ func TestTemplateInvalidIndexPattern(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "{{host}}-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, @@ -164,7 +168,6 @@ func TestGetTagKeys(t *testing.T) { t.Errorf("Expected tagKeys %s, got %s\n", test.ExpectedTagKeys, tagKeys) } } - } func TestGetIndexName(t *testing.T) { @@ -257,3 +260,70 @@ func TestGetIndexName(t *testing.T) { } } } + +func TestRequestHeaderWhenGzipIsEnabled(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_bulk": + require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) + require.Equal(t, "gzip", r.Header.Get("Accept-Encoding")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) + return + default: + _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + urls := []string{"http://" + ts.Listener.Addr().String()} + + e := &Elasticsearch{ + URLs: urls, + IndexName: "{{host}}-%Y.%m.%d", + Timeout: config.Duration(time.Second * 5), + EnableGzip: true, + ManageTemplate: false, + } + + err := e.Connect() + require.NoError(t, err) + + err = e.Write(testutil.MockMetrics()) + require.NoError(t, err) +} + +func TestRequestHeaderWhenGzipIsDisabled(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_bulk": + require.NotEqual(t, "gzip", r.Header.Get("Content-Encoding")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) + return + default: + _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + urls := []string{"http://" + ts.Listener.Addr().String()} + + e := &Elasticsearch{ + URLs: urls, + IndexName: "{{host}}-%Y.%m.%d", + Timeout: config.Duration(time.Second * 5), + EnableGzip: false, + ManageTemplate: false, + } + + err := e.Connect() + require.NoError(t, err) + + err = e.Write(testutil.MockMetrics()) + require.NoError(t, err) +} diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md index d82676a251e4e..7e19b9a8475c6 100644 --- a/plugins/outputs/exec/README.md +++ b/plugins/outputs/exec/README.md @@ -8,6 +8,8 @@ The command should be defined similar to docker's `exec` form: On non-zero exit stderr will be logged at error level. +For better performance, consider execd, which runs continuously. + ### Configuration ```toml diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index d3697627e5f92..b0313a382045a 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -6,9 +6,11 @@ import ( "io" "log" "os/exec" + "runtime" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" @@ -18,8 +20,8 @@ const maxStderrBytes = 512 // Exec defines the exec output plugin. type Exec struct { - Command []string `toml:"command"` - Timeout internal.Duration `toml:"timeout"` + Command []string `toml:"command"` + Timeout config.Duration `toml:"timeout"` runner Runner serializer serializers.Serializer @@ -39,6 +41,10 @@ var sampleConfig = ` # data_format = "influx" ` +func (e *Exec) Init() error { + return nil +} + // SetSerializer sets the serializer for the output. func (e *Exec) SetSerializer(serializer serializers.Serializer) { e.serializer = serializer @@ -77,7 +83,7 @@ func (e *Exec) Write(metrics []telegraf.Metric) error { return nil } - return e.runner.Run(e.Timeout.Duration, e.Command, &buffer) + return e.runner.Run(time.Duration(e.Timeout), e.Command, &buffer) } // Runner provides an interface for running exec.Cmd. @@ -101,12 +107,17 @@ func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.R s := stderr if err != nil { - if err == internal.TimeoutErr { + if err == internal.ErrTimeout { return fmt.Errorf("%q timed out and was killed", command) } + s = removeWindowsCarriageReturns(s) if s.Len() > 0 { - log.Printf("E! [outputs.exec] Command error: %q", truncate(s)) + if !telegraf.Debug { + log.Printf("E! [outputs.exec] Command error: %q", c.truncate(s)) + } else { + log.Printf("D! [outputs.exec] Command error: %q", s) + } } if status, ok := internal.ExitStatus(err); ok { @@ -121,7 +132,7 @@ func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.R return nil } -func truncate(buf bytes.Buffer) string { +func (c *CommandRunner) truncate(buf bytes.Buffer) string { // Limit the number of bytes. didTruncate := false if buf.Len() > maxStderrBytes { @@ -145,7 +156,26 @@ func init() { outputs.Add("exec", func() telegraf.Output { return &Exec{ runner: &CommandRunner{}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } + +// removeWindowsCarriageReturns removes all carriage returns from the input if the +// OS is Windows. It does not return any errors. +func removeWindowsCarriageReturns(b bytes.Buffer) bytes.Buffer { + if runtime.GOOS == "windows" { + var buf bytes.Buffer + for { + byt, err := b.ReadBytes(0x0D) + byt = bytes.TrimRight(byt, "\x0d") + if len(byt) > 0 { + _, _ = buf.Write(byt) + } + if err == io.EOF { + return buf + } + } + } + return b +} diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go index 850ba7328a03b..e75e1829d3894 100644 --- a/plugins/outputs/exec/exec_test.go +++ b/plugins/outputs/exec/exec_test.go @@ -6,18 +6,15 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestExec(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to OS/executable dependencies") - } + t.Skip("Skipping test due to OS/executable dependencies and race condition when ran as part of a test-all") tests := []struct { name string @@ -55,7 +52,7 @@ func TestExec(t *testing.T) { t.Run(tt.name, func(t *testing.T) { e := &Exec{ Command: tt.command, - Timeout: internal.Duration{Duration: time.Second}, + Timeout: config.Duration(time.Second), runner: &CommandRunner{}, } @@ -86,9 +83,10 @@ func TestTruncate(t *testing.T) { len: len("hola") + len("..."), }, } + c := CommandRunner{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := truncate(*tt.buf) + s := c.truncate(*tt.buf) require.Equal(t, tt.len, len(s)) }) } diff --git a/plugins/outputs/execd/execd_test.go b/plugins/outputs/execd/execd_test.go index 46bde795ec2ed..c14339d31a85a 100644 --- a/plugins/outputs/execd/execd_test.go +++ b/plugins/outputs/execd/execd_test.go @@ -55,13 +55,12 @@ func TestExternalOutputWorks(t *testing.T) { wg.Done() } - m, err := metric.New( + m := metric.New( "cpu", map[string]string{"name": "cpu1"}, map[string]interface{}{"idle": 50, "sys": 30}, now, ) - require.NoError(t, err) require.NoError(t, e.Connect()) require.NoError(t, e.Write([]telegraf.Metric{m})) diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 3798f107aa157..0c8ff903e97bc 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -4,21 +4,22 @@ import ( "fmt" "io" "os" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) type File struct { - Files []string `toml:"files"` - RotationInterval internal.Duration `toml:"rotation_interval"` - RotationMaxSize internal.Size `toml:"rotation_max_size"` - RotationMaxArchives int `toml:"rotation_max_archives"` - UseBatchFormat bool `toml:"use_batch_format"` - Log telegraf.Logger `toml:"-"` + Files []string `toml:"files"` + RotationInterval config.Duration `toml:"rotation_interval"` + RotationMaxSize config.Size `toml:"rotation_max_size"` + RotationMaxArchives int `toml:"rotation_max_archives"` + UseBatchFormat bool `toml:"use_batch_format"` + Log telegraf.Logger `toml:"-"` writer io.Writer closers []io.Closer @@ -69,7 +70,7 @@ func (f *File) Connect() error { writers = append(writers, os.Stdout) } else { of, err := rotate.NewFileWriter( - file, f.RotationInterval.Duration, f.RotationMaxSize.Size, f.RotationMaxArchives) + file, time.Duration(f.RotationInterval), int64(f.RotationMaxSize), f.RotationMaxArchives) if err != nil { return err } @@ -102,7 +103,7 @@ func (f *File) Description() string { } func (f *File) Write(metrics []telegraf.Metric) error { - var writeErr error = nil + var writeErr error if f.UseBatchFormat { octets, err := f.serializer.SerializeBatch(metrics) @@ -123,7 +124,7 @@ func (f *File) Write(metrics []telegraf.Metric) error { _, err = f.writer.Write(b) if err != nil { - writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err) + writeErr = fmt.Errorf("failed to write message: %v", err) } } } diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go index f1e87853d6153..5fcdc511972ac 100644 --- a/plugins/outputs/file/file_test.go +++ b/plugins/outputs/file/file_test.go @@ -3,7 +3,6 @@ package file import ( "bytes" "io" - "io/ioutil" "os" "testing" @@ -181,7 +180,7 @@ func TestFileStdout(t *testing.T) { } func createFile() *os.File { - f, err := ioutil.TempFile("", "") + f, err := os.CreateTemp("", "") if err != nil { panic(err) } @@ -190,7 +189,7 @@ func createFile() *os.File { } func tmpFile() string { - d, err := ioutil.TempDir("", "") + d, err := os.MkdirTemp("", "") if err != nil { panic(err) } @@ -198,7 +197,7 @@ func tmpFile() string { } func validateFile(fname, expS string, t *testing.T) { - buf, err := ioutil.ReadFile(fname) + buf, err := os.ReadFile(fname) if err != nil { panic(err) } diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 6c871ae174580..bd35a4203385a 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -4,7 +4,6 @@ import ( "crypto/tls" "errors" "io" - "log" "math/rand" "net" "time" @@ -16,15 +15,18 @@ import ( ) type Graphite struct { - GraphiteTagSupport bool - GraphiteSeparator string + GraphiteTagSupport bool `toml:"graphite_tag_support"` + GraphiteTagSanitizeMode string `toml:"graphite_tag_sanitize_mode"` + GraphiteSeparator string `toml:"graphite_separator"` // URL is only for backwards compatibility - Servers []string - Prefix string - Template string - Templates []string - Timeout int - conns []net.Conn + Servers []string `toml:"servers"` + Prefix string `toml:"prefix"` + Template string `toml:"template"` + Templates []string `toml:"templates"` + Timeout int `toml:"timeout"` + Log telegraf.Logger `toml:"-"` + + conns []net.Conn tlsint.ClientConfig } @@ -42,6 +44,11 @@ var sampleConfig = ` ## Enable Graphite tags support # graphite_tag_support = false + ## Define how metric names and tags are sanitized; options are "strict", or "compatible" + ## strict - Default method, and backwards compatible with previous versionf of Telegraf + ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec + # graphite_tag_sanitize_mode = "strict" + ## Character for separating metric name and field for Graphite tags # graphite_separator = "." @@ -124,22 +131,22 @@ func (g *Graphite) Description() string { // We can detect that by finding an eof // if not for this, we can happily write and flush without getting errors (in Go) but getting RST tcp packets back (!) // props to Tv via the authors of carbon-relay-ng` for this trick. -func checkEOF(conn net.Conn) { +func (g *Graphite) checkEOF(conn net.Conn) { b := make([]byte, 1024) conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) num, err := conn.Read(b) if err == io.EOF { - log.Printf("E! Conn %s is closed. closing conn explicitly", conn) + g.Log.Errorf("Conn %s is closed. closing conn explicitly", conn) conn.Close() return } // just in case i misunderstand something or the remote behaves badly if num != 0 { - log.Printf("I! conn %s .conn.Read data? did not expect that. data: %s\n", conn, b[:num]) + g.Log.Infof("conn %s .conn.Read data? did not expect that. data: %s", conn, b[:num]) } // Log non-timeout errors or close. if e, ok := err.(net.Error); !(ok && e.Timeout()) { - log.Printf("E! conn %s checkEOF .conn.Read returned err != EOF, which is unexpected. closing conn. error: %s\n", conn, err) + g.Log.Errorf("conn %s checkEOF .conn.Read returned err != EOF, which is unexpected. closing conn. error: %s", conn, err) conn.Close() } } @@ -149,7 +156,7 @@ func checkEOF(conn net.Conn) { func (g *Graphite) Write(metrics []telegraf.Metric) error { // Prepare data var batch []byte - s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.GraphiteSeparator, g.Templates) + s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.GraphiteTagSanitizeMode, g.GraphiteSeparator, g.Templates) if err != nil { return err } @@ -157,7 +164,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := s.Serialize(metric) if err != nil { - log.Printf("E! Error serializing some metrics to graphite: %s", err.Error()) + g.Log.Errorf("Error serializing some metrics to graphite: %s", err.Error()) } batch = append(batch, buf...) } @@ -166,7 +173,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { // try to reconnect and retry to send if err != nil { - log.Println("E! Graphite: Reconnecting and retrying: ") + g.Log.Error("Graphite: Reconnecting and retrying...") g.Connect() err = g.send(batch) } @@ -176,7 +183,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { func (g *Graphite) send(batch []byte) error { // This will get set to nil if a successful write occurs - err := errors.New("Could not write to any Graphite server in cluster\n") + err := errors.New("could not write to any Graphite server in cluster") // Send data to a random server p := rand.Perm(len(g.conns)) @@ -184,13 +191,12 @@ func (g *Graphite) send(batch []byte) error { if g.Timeout > 0 { g.conns[n].SetWriteDeadline(time.Now().Add(time.Duration(g.Timeout) * time.Second)) } - checkEOF(g.conns[n]) + g.checkEOF(g.conns[n]) if _, e := g.conns[n].Write(batch); e != nil { // Error - log.Println("E! Graphite Error: " + e.Error()) - // Close explicitly + g.Log.Errorf("Graphite Error: " + e.Error()) + // Close explicitly and let's try the next one g.conns[n].Close() - // Let's try the next one } else { // Success err = nil diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 82aad0d7d6ee6..1cb58b19485fc 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -8,6 +8,8 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" @@ -18,11 +20,12 @@ import ( func TestGraphiteError(t *testing.T) { // Init plugin g := Graphite{ - Servers: []string{"127.0.0.1:2003", "127.0.0.1:12003"}, + Servers: []string{"127.0.0.1:12004", "127.0.0.1:12003"}, Prefix: "my.prefix", + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"mymeasurement": float64(3.14)}, @@ -36,7 +39,7 @@ func TestGraphiteError(t *testing.T) { require.NoError(t, err1) err2 := g.Write(metrics) require.Error(t, err2) - assert.Equal(t, "Could not write to any Graphite server in cluster\n", err2.Error()) + assert.Equal(t, "could not write to any Graphite server in cluster", err2.Error()) } func TestGraphiteOK(t *testing.T) { @@ -48,23 +51,25 @@ func TestGraphiteOK(t *testing.T) { // Init plugin g := Graphite{ - Prefix: "my.prefix", + Prefix: "my.prefix", + Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -109,22 +114,24 @@ func TestGraphiteOkWithSeparatorDot(t *testing.T) { g := Graphite{ Prefix: "my.prefix", GraphiteSeparator: ".", + Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -169,22 +176,24 @@ func TestGraphiteOkWithSeparatorUnderscore(t *testing.T) { g := Graphite{ Prefix: "my.prefix", GraphiteSeparator: "_", + Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -233,22 +242,24 @@ func TestGraphiteOKWithMultipleTemplates(t *testing.T) { "my_* host.measurement.tags.field", "measurement.tags.host.field", }, + Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, map[string]interface{}{"value": float64(3.14)}, @@ -293,22 +304,24 @@ func TestGraphiteOkWithTags(t *testing.T) { g := Graphite{ Prefix: "my.prefix", GraphiteTagSupport: true, + Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -354,22 +367,24 @@ func TestGraphiteOkWithTagsAndSeparatorDot(t *testing.T) { Prefix: "my.prefix", GraphiteTagSupport: true, GraphiteSeparator: ".", + Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -415,22 +430,24 @@ func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { Prefix: "my_prefix", GraphiteTagSupport: true, GraphiteSeparator: "_", + Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -465,7 +482,8 @@ func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { } func TCPServer1(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, err := net.Listen("tcp", "127.0.0.1:12003") + require.NoError(t, err) go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -479,7 +497,7 @@ func TCPServer1(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() @@ -495,7 +513,7 @@ func TCPServer2(t *testing.T, wg *sync.WaitGroup) { } func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -509,7 +527,7 @@ func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() @@ -525,7 +543,7 @@ func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { } func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -539,7 +557,7 @@ func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() @@ -555,7 +573,7 @@ func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) { } func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -569,7 +587,7 @@ func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 34f2ec6d93932..cf5dc6dc5ac3b 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -63,7 +63,6 @@ func (g *Gelf) Write(message []byte) (n int, err error) { length := compressed.Len() if length > chunksize { - chunkCountInt := int(math.Ceil(float64(length) / float64(chunksize))) id := make([]byte, 8) @@ -71,13 +70,13 @@ func (g *Gelf) Write(message []byte) (n int, err error) { for i, index := 0, 0; i < length; i, index = i+chunksize, index+1 { packet := g.createChunkedMessage(index, chunkCountInt, id, &compressed) - _, err = g.send(packet.Bytes()) + err = g.send(packet.Bytes()) if err != nil { return 0, err } } } else { - _, err = g.send(compressed.Bytes()) + err = g.send(compressed.Bytes()) if err != nil { return 0, err } @@ -134,19 +133,19 @@ func (g *Gelf) compress(b []byte) bytes.Buffer { return buf } -func (g *Gelf) send(b []byte) (n int, err error) { +func (g *Gelf) send(b []byte) error { udpAddr, err := net.ResolveUDPAddr("udp", g.GelfConfig.GraylogEndpoint) if err != nil { - return + return err } conn, err := net.DialUDP("udp", nil, udpAddr) if err != nil { - return + return err } - n, err = conn.Write(b) - return + _, err = conn.Write(b) + return err } type Graylog struct { @@ -215,7 +214,7 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { m := make(map[string]interface{}) m["version"] = "1.1" - m["timestamp"] = metric.Time().UnixNano() / 1000000000 + m["timestamp"] = float64(metric.Time().UnixNano()) / 1_000_000_000 m["short_message"] = "telegraf" m["name"] = metric.Name() diff --git a/plugins/outputs/health/compares.go b/plugins/outputs/health/compares.go index 9228bd2df7187..ff19da76b0ff6 100644 --- a/plugins/outputs/health/compares.go +++ b/plugins/outputs/health/compares.go @@ -68,9 +68,8 @@ func asFloat(fv interface{}) (float64, bool) { case bool: if v { return 1.0, true - } else { - return 0.0, true } + return 0.0, true default: return 0.0, false } diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index f411305616954..4541659cec030 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -4,7 +4,6 @@ import ( "context" "crypto/tls" "errors" - "log" "net" "net/http" "net/url" @@ -12,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -68,15 +68,16 @@ type Checker interface { } type Health struct { - ServiceAddress string `toml:"service_address"` - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` + ServiceAddress string `toml:"service_address"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` tlsint.ServerConfig - Compares []*Compares `toml:"compares"` - Contains []*Contains `toml:"contains"` + Compares []*Compares `toml:"compares"` + Contains []*Contains `toml:"contains"` + Log telegraf.Logger `toml:"-"` checkers []Checker wg sync.WaitGroup @@ -141,8 +142,8 @@ func (h *Health) Connect() error { h.server = &http.Server{ Addr: h.ServiceAddress, Handler: authHandler(h), - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), TLSConfig: h.tlsConf, } @@ -153,14 +154,14 @@ func (h *Health) Connect() error { h.origin = h.getOrigin(listener) - log.Printf("I! [outputs.health] Listening on %s", h.origin) + h.Log.Infof("Listening on %s", h.origin) h.wg.Add(1) go func() { defer h.wg.Done() err := h.server.Serve(listener) if err != http.ErrServerClosed { - log.Printf("E! [outputs.health] Serve error on %s: %v", h.origin, err) + h.Log.Errorf("Serve error on %s: %v", h.origin, err) } h.origin = "" }() @@ -174,12 +175,11 @@ func onAuthError(_ http.ResponseWriter) { func (h *Health) listen() (net.Listener, error) { if h.tlsConf != nil { return tls.Listen(h.network, h.address, h.tlsConf) - } else { - return net.Listen(h.network, h.address) } + return net.Listen(h.network, h.address) } -func (h *Health) ServeHTTP(rw http.ResponseWriter, req *http.Request) { +func (h *Health) ServeHTTP(rw http.ResponseWriter, _ *http.Request) { var code = http.StatusOK if !h.isHealthy() { code = http.StatusServiceUnavailable @@ -241,7 +241,6 @@ func (h *Health) getOrigin(listener net.Listener) string { } return origin.String() } - } func (h *Health) setHealthy(healthy bool) { @@ -259,8 +258,8 @@ func (h *Health) isHealthy() bool { func NewHealth() *Health { return &Health{ ServiceAddress: defaultServiceAddress, - ReadTimeout: internal.Duration{Duration: defaultReadTimeout}, - WriteTimeout: internal.Duration{Duration: defaultWriteTimeout}, + ReadTimeout: config.Duration(defaultReadTimeout), + WriteTimeout: config.Duration(defaultWriteTimeout), healthy: true, } } diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index 5bf35ad8320e4..03a08fca21e7b 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -1,7 +1,7 @@ package health_test import ( - "io/ioutil" + "io" "net/http" "testing" "time" @@ -106,6 +106,7 @@ func TestHealth(t *testing.T) { output.ServiceAddress = "tcp://127.0.0.1:0" output.Compares = tt.options.Compares output.Contains = tt.options.Contains + output.Log = testutil.Logger{} err := output.Init() require.NoError(t, err) @@ -120,7 +121,7 @@ func TestHealth(t *testing.T) { require.NoError(t, err) require.Equal(t, tt.expectedCode, resp.StatusCode) - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) require.NoError(t, err) err = output.Close() @@ -140,6 +141,7 @@ func TestInitServiceAddress(t *testing.T) { name: "port without scheme is not allowed", plugin: &health.Health{ ServiceAddress: ":8080", + Log: testutil.Logger{}, }, err: true, }, @@ -147,6 +149,7 @@ func TestInitServiceAddress(t *testing.T) { name: "path without scheme is not allowed", plugin: &health.Health{ ServiceAddress: "/tmp/telegraf", + Log: testutil.Logger{}, }, err: true, }, @@ -154,6 +157,7 @@ func TestInitServiceAddress(t *testing.T) { name: "tcp with port maps to http", plugin: &health.Health{ ServiceAddress: "tcp://:8080", + Log: testutil.Logger{}, }, }, { @@ -161,30 +165,35 @@ func TestInitServiceAddress(t *testing.T) { plugin: &health.Health{ ServiceAddress: "tcp://:8080", ServerConfig: *pki.TLSServerConfig(), + Log: testutil.Logger{}, }, }, { name: "tcp4 is allowed", plugin: &health.Health{ ServiceAddress: "tcp4://:8080", + Log: testutil.Logger{}, }, }, { name: "tcp6 is allowed", plugin: &health.Health{ ServiceAddress: "tcp6://:8080", + Log: testutil.Logger{}, }, }, { name: "http scheme", plugin: &health.Health{ ServiceAddress: "http://:8080", + Log: testutil.Logger{}, }, }, { name: "https scheme", plugin: &health.Health{ ServiceAddress: "https://:8080", + Log: testutil.Logger{}, }, }, } @@ -192,6 +201,7 @@ func TestInitServiceAddress(t *testing.T) { t.Run(tt.name, func(t *testing.T) { output := health.NewHealth() output.ServiceAddress = tt.plugin.ServiceAddress + output.Log = testutil.Logger{} err := output.Init() if tt.err { diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 0229c0e6ada7f..d90192b705a4f 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -1,7 +1,7 @@ # HTTP Output Plugin This plugin sends metrics in a HTTP message encoded using one of the output -data formats. For data_formats that support batching, metrics are sent in batch format. +data formats. For data_formats that support batching, metrics are sent in batch format. ### Configuration: @@ -34,6 +34,15 @@ data formats. For data_formats that support batching, metrics are sent in batch ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: @@ -48,4 +57,13 @@ data formats. For data_formats that support batching, metrics are sent in batch # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" + + ## Idle (keep-alive) connection timeout. + ## Maximum amount of time before idle connection is closed. + ## Zero means no limit. + # idle_conn_timeout = 0 ``` + +### Optional Cookie Authentication Settings: + +The optional Cookie Authentication Settings will retrieve a cookie from the given authorization endpoint, and use it in subsequent API requests. This is useful for services that do not provide OAuth or Basic Auth authentication, e.g. the [Tesla Powerwall API](https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network), which uses a Cookie Auth Body to retrieve an authorization cookie. The Cookie Auth Renewal interval will renew the authorization by retrieving a new cookie at the given interval. diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index d75d5ef5a4df2..c94052ea92c1c 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -1,26 +1,25 @@ package http import ( + "bufio" "bytes" "context" "fmt" "io" - "io/ioutil" "net/http" "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "golang.org/x/oauth2" - "golang.org/x/oauth2/clientcredentials" ) const ( - defaultURL = "http://127.0.0.1:8080/telegraf" + maxErrMsgLen = 1024 + defaultURL = "http://127.0.0.1:8080/telegraf" ) var sampleConfig = ` @@ -50,6 +49,15 @@ var sampleConfig = ` ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: @@ -64,6 +72,11 @@ var sampleConfig = ` # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" + + ## Idle (keep-alive) connection timeout. + ## Maximum amount of time before idle connection is closed. + ## Zero means no limit. + # idle_conn_timeout = 0 ` const ( @@ -74,17 +87,13 @@ const ( type HTTP struct { URL string `toml:"url"` - Timeout internal.Duration `toml:"timeout"` Method string `toml:"method"` Username string `toml:"username"` Password string `toml:"password"` Headers map[string]string `toml:"headers"` - ClientID string `toml:"client_id"` - ClientSecret string `toml:"client_secret"` - TokenURL string `toml:"token_url"` - Scopes []string `toml:"scopes"` ContentEncoding string `toml:"content_encoding"` - tls.ClientConfig + httpconfig.HTTPClientConfig + Log telegraf.Logger `toml:"-"` client *http.Client serializer serializers.Serializer @@ -94,34 +103,6 @@ func (h *HTTP) SetSerializer(serializer serializers.Serializer) { h.serializer = serializer } -func (h *HTTP) createClient(ctx context.Context) (*http.Client, error) { - tlsCfg, err := h.ClientConfig.TLSConfig() - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, - } - - if h.ClientID != "" && h.ClientSecret != "" && h.TokenURL != "" { - oauthConfig := clientcredentials.Config{ - ClientID: h.ClientID, - ClientSecret: h.ClientSecret, - TokenURL: h.TokenURL, - Scopes: h.Scopes, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, client) - client = oauthConfig.Client(ctx) - } - - return client, nil -} - func (h *HTTP) Connect() error { if h.Method == "" { h.Method = http.MethodPost @@ -131,12 +112,8 @@ func (h *HTTP) Connect() error { return fmt.Errorf("invalid method [%s] %s", h.URL, h.Method) } - if h.Timeout.Duration == 0 { - h.Timeout.Duration = defaultClientTimeout - } - ctx := context.Background() - client, err := h.createClient(ctx) + client, err := h.HTTPClientConfig.CreateClient(ctx, h.Log) if err != nil { return err } @@ -164,11 +141,7 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error { return err } - if err := h.write(reqBody); err != nil { - return err - } - - return nil + return h.write(reqBody) } func (h *HTTP) write(reqBody []byte) error { @@ -210,10 +183,20 @@ func (h *HTTP) write(reqBody []byte) error { return err } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("when writing to [%s] received status code: %d", h.URL, resp.StatusCode) + errorLine := "" + scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) + if scanner.Scan() { + errorLine = scanner.Text() + } + + return fmt.Errorf("when writing to [%s] received status code: %d. body: %s", h.URL, resp.StatusCode, errorLine) + } + + _, err = io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) } return nil @@ -222,9 +205,8 @@ func (h *HTTP) write(reqBody []byte) error { func init() { outputs.Add("http", func() telegraf.Output { return &HTTP{ - Timeout: internal.Duration{Duration: defaultClientTimeout}, - Method: defaultMethod, - URL: defaultURL, + Method: defaultMethod, + URL: defaultURL, } }) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index abcf2db33dabc..d6803eed3211d 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -3,7 +3,7 @@ package http import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -13,12 +13,14 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + oauth "github.com/influxdata/telegraf/plugins/common/oauth" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/stretchr/testify/require" ) func getMetric() telegraf.Metric { - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -26,9 +28,7 @@ func getMetric() telegraf.Metric { }, time.Unix(0, 0), ) - if err != nil { - panic(err) - } + return m } @@ -272,7 +272,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) require.Contains(t, string(payload), "cpu value=42") @@ -381,11 +381,15 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { { name: "success", plugin: &HTTP{ - URL: u.String() + "/write", - ClientID: "howdy", - ClientSecret: "secret", - TokenURL: u.String() + "/token", - Scopes: []string{"urn:opc:idm:__myscopes__"}, + URL: u.String() + "/write", + HTTPClientConfig: httpconfig.HTTPClientConfig{ + OAuth2Config: oauth.OAuth2Config{ + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + }, }, tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index aefc03690a8da..36fde827e176a 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -75,7 +75,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -85,8 +85,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ``` ### Metrics - Reference the [influx serializer][] for details about metric production. - + [InfluxDB v1.x]: https://github.com/influxdata/influxdb [influx serializer]: /plugins/serializers/influx/README.md#Metrics diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 57e3e918b8202..ac85814db1f34 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -1,12 +1,13 @@ package influxdb import ( + "bytes" "context" "crypto/tls" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -20,13 +21,14 @@ import ( ) const ( - defaultRequestTimeout = time.Second * 5 - defaultDatabase = "telegraf" - errStringDatabaseNotFound = "database not found" - errStringHintedHandoffNotEmpty = "hinted handoff queue not empty" - errStringPartialWrite = "partial write" - errStringPointsBeyondRP = "points beyond retention policy" - errStringUnableToParse = "unable to parse" + defaultRequestTimeout = time.Second * 5 + defaultDatabase = "telegraf" + errStringDatabaseNotFound = "database not found" + errStringRetentionPolicyNotFound = "retention policy not found" + errStringHintedHandoffNotEmpty = "hinted handoff queue not empty" + errStringPartialWrite = "partial write" + errStringPointsBeyondRP = "points beyond retention policy" + errStringUnableToParse = "unable to parse" ) var ( @@ -216,8 +218,19 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error } defer resp.Body.Close() + body, err := c.validateResponse(resp.Body) + + // Check for poorly formatted response (can't be decoded) + if err != nil { + return &APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: "An error response was received while attempting to create the following database: " + database + ". Error: " + err.Error(), + } + } + queryResp := &QueryResponse{} - dec := json.NewDecoder(resp.Body) + dec := json.NewDecoder(body) err = dec.Decode(queryResp) if err != nil { @@ -316,7 +329,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []telegraf.Metric) error { loc, err := makeWriteURL(c.config.URL, db, rp, c.config.Consistency) if err != nil { - return err + return fmt.Errorf("failed making write url: %s", err.Error()) } reader, err := c.requestBodyReader(metrics) @@ -327,13 +340,13 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te req, err := c.makeWriteRequest(loc, reader) if err != nil { - return err + return fmt.Errorf("failed making write req: %s", err.Error()) } resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { internal.OnClientError(c.client, err) - return err + return fmt.Errorf("failed doing req: %s", err.Error()) } defer resp.Body.Close() @@ -341,15 +354,25 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te return nil } + body, err := c.validateResponse(resp.Body) + + // Check for poorly formatted response that can't be decoded + if err != nil { + return &APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: "An error response was received while attempting to write metrics. Error: " + err.Error(), + } + } + writeResp := &WriteResponse{} - dec := json.NewDecoder(resp.Body) + dec := json.NewDecoder(body) var desc string err = dec.Decode(writeResp) if err == nil { desc = writeResp.Err } - if strings.Contains(desc, errStringDatabaseNotFound) { return &DatabaseNotFoundError{ APIError: APIError{ @@ -361,6 +384,18 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te } } + //checks for any 4xx code and drops metric and retrying will not make the request work + if len(resp.Status) > 0 && resp.Status[0] == '4' { + c.log.Errorf("E! [outputs.influxdb] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) + return nil + } + + // This error handles if there is an invaild or missing retention policy + if strings.Contains(desc, errStringRetentionPolicyNotFound) { + c.log.Errorf("When writing to [%s]: received error %v", c.URL(), desc) + return nil + } + // This "error" is an informational message about the state of the // InfluxDB cluster. if strings.Contains(desc, errStringHintedHandoffNotEmpty) { @@ -426,7 +461,7 @@ func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request req, err := http.NewRequest("POST", url, body) if err != nil { - return nil, err + return nil, fmt.Errorf("failed creating new request: %s", err.Error()) } req.Header.Set("Content-Type", "text/plain; charset=utf-8") @@ -453,7 +488,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { @@ -466,6 +501,27 @@ func (c *httpClient) addHeaders(req *http.Request) { } } +func (c *httpClient) validateResponse(response io.ReadCloser) (io.ReadCloser, error) { + bodyBytes, err := io.ReadAll(response) + if err != nil { + return nil, err + } + defer response.Close() + + originalResponse := io.NopCloser(bytes.NewBuffer(bodyBytes)) + + // Empty response is valid. + if response == http.NoBody || len(bodyBytes) == 0 || bodyBytes == nil { + return originalResponse, nil + } + + if valid := json.Valid(bodyBytes); !valid { + err = errors.New(string(bodyBytes)) + } + + return originalResponse, err +} + func makeWriteURL(loc *url.URL, db, rp, consistency string) (string, error) { params := url.Values{} params.Set("db", db) diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 1d030d36cd583..ba4dd2d81b12a 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -1,3 +1,4 @@ +//nolint package influxdb_test import ( @@ -5,7 +6,7 @@ import ( "compress/gzip" "context" "fmt" - "io/ioutil" + "io" "log" "net" "net/http" @@ -13,7 +14,6 @@ import ( "net/url" "os" "path" - "strings" "testing" "time" @@ -212,6 +212,26 @@ func TestHTTP_CreateDatabase(t *testing.T) { w.WriteHeader(http.StatusOK) }, }, + { + name: "invalid json response is handled", + config: influxdb.HTTPConfig{ + URL: u, + Database: `database`, + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`invalid response`)) + }, + errFunc: func(t *testing.T, err error) { + expected := &influxdb.APIError{ + StatusCode: 400, + Title: "400 Bad Request", + Description: "An error response was received while attempting to create the following database: database. Error: invalid response", + } + + require.Equal(t, expected, err) + }, + }, } for _, tt := range tests { @@ -264,7 +284,7 @@ func TestHTTP_Write(t *testing.T) { }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -366,7 +386,7 @@ func TestHTTP_Write(t *testing.T) { }, }, { - name: "hinted handoff not empty no log no error", + name: "hinted handoff not empty no error", config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", @@ -376,8 +396,8 @@ func TestHTTP_Write(t *testing.T) { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(`{"error": "write failed: hinted handoff queue not empty"}`)) }, - logFunc: func(t *testing.T, str string) { - require.False(t, strings.Contains(str, "hinted handoff queue not empty")) + errFunc: func(t *testing.T, err error) { + require.NoError(t, err) }, }, { @@ -470,7 +490,7 @@ func TestHTTP_Write(t *testing.T) { ctx := context.Background() - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -478,7 +498,6 @@ func TestHTTP_Write(t *testing.T) { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics := []telegraf.Metric{m} client, err := influxdb.NewHTTPClient(tt.config) @@ -521,7 +540,7 @@ func TestHTTP_WritePathPrefix(t *testing.T) { ctx := context.Background() - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -529,7 +548,6 @@ func TestHTTP_WritePathPrefix(t *testing.T) { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics := []telegraf.Metric{m} config := influxdb.HTTPConfig{ @@ -555,7 +573,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -575,7 +593,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { ctx := context.Background() - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -600,7 +618,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { } func TestHTTP_UnixSocket(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf-test") + tmpdir, err := os.MkdirTemp("", "telegraf-test") if err != nil { require.NoError(t, err) } @@ -682,7 +700,7 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["db"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -817,7 +835,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) @@ -899,7 +917,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -930,7 +948,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) @@ -1128,10 +1146,66 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { err = output.Connect() require.NoError(t, err) + + // this write fails, but we're expecting it to drop the metrics and not retry, so no error. err = output.Write(metrics) - require.Error(t, err) + require.NoError(t, err) + + // expects write to succeed err = output.Write(metrics) require.NoError(t, err) require.True(t, handlers.Done(), "all handlers not called") } + +func TestDBNotFoundShouldDropMetricWhenSkipDatabaseCreateIsTrue(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + f := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + } + + ts.Config.Handler = http.HandlerFunc(f) + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + logger := &testutil.CaptureLogger{} + output := influxdb.InfluxDB{ + URL: u.String(), + Database: "telegraf", + DatabaseTag: "database", + SkipDatabaseCreation: true, + Log: logger, + CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { + return influxdb.NewHTTPClient(*config) + }, + } + + err = output.Connect() + require.NoError(t, err) + err = output.Write(metrics) + require.Contains(t, logger.LastError, "database not found") + require.NoError(t, err) + + err = output.Write(metrics) + require.Contains(t, logger.LastError, "database not found") + require.NoError(t, err) +} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 68e8c93ac4aa5..1ea39a5e56505 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -1,3 +1,4 @@ +//nolint package influxdb import ( @@ -9,7 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -43,8 +44,8 @@ type InfluxDB struct { ExcludeRetentionPolicyTag bool `toml:"exclude_retention_policy_tag"` UserAgent string `toml:"user_agent"` WriteConsistency string `toml:"write_consistency"` - Timeout internal.Duration `toml:"timeout"` - UDPPayload internal.Size `toml:"udp_payload"` + Timeout config.Duration `toml:"timeout"` + UDPPayload config.Size `toml:"udp_payload"` HTTPProxy string `toml:"http_proxy"` HTTPHeaders map[string]string `toml:"http_headers"` ContentEncoding string `toml:"content_encoding"` @@ -131,7 +132,7 @@ var sampleConfig = ` ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -210,6 +211,7 @@ func (i *InfluxDB) SampleConfig() string { func (i *InfluxDB) Write(metrics []telegraf.Metric) error { ctx := context.Background() + allErrorsAreDatabaseNotFoundErrors := true var err error p := rand.Perm(len(i.clients)) for _, n := range p { @@ -219,27 +221,38 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { return nil } + i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) + switch apiError := err.(type) { case *DatabaseNotFoundError: - if !i.SkipDatabaseCreation { - err := client.CreateDatabase(ctx, apiError.Database) - if err != nil { - i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate", - client.URL(), apiError.Database) - } + if i.SkipDatabaseCreation { + continue + } + // retry control + // error so the write is retried + err := client.CreateDatabase(ctx, apiError.Database) + if err != nil { + i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate", + client.URL(), apiError.Database) + } else { + return errors.New("database created; retry write") } + default: + allErrorsAreDatabaseNotFoundErrors = false } - - i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) } + if allErrorsAreDatabaseNotFoundErrors { + // return nil because we should not be retrying this + return nil + } return errors.New("could not write any address") } func (i *InfluxDB) udpClient(url *url.URL) (Client, error) { config := &UDPConfig{ URL: url, - MaxPayloadSize: int(i.UDPPayload.Size), + MaxPayloadSize: int(i.UDPPayload), Serializer: i.newSerializer(), Log: i.Log, } @@ -260,7 +273,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) config := &HTTPConfig{ URL: url, - Timeout: i.Timeout.Duration, + Timeout: time.Duration(i.Timeout), TLSConfig: tlsConfig, UserAgent: i.UserAgent, Username: i.Username, @@ -308,13 +321,14 @@ func (i *InfluxDB) newSerializer() *influx.Serializer { func init() { outputs.Add("influxdb", func() telegraf.Output { return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), CreateHTTPClientF: func(config *HTTPConfig) (Client, error) { return NewHTTPClient(*config) }, CreateUDPClientF: func(config *UDPConfig) (Client, error) { return NewUDPClient(*config) }, + ContentEncoding: "gzip", } }) } diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 476211069af7b..d0f50bbfed94f 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs/influxdb" @@ -95,7 +95,7 @@ func TestConnectUDPConfig(t *testing.T) { output := influxdb.InfluxDB{ URLs: []string{"udp://localhost:8089"}, - UDPPayload: internal.Size{Size: 42}, + UDPPayload: config.Size(42), CreateUDPClientF: func(config *influxdb.UDPConfig) (influxdb.Client, error) { actual = config @@ -120,7 +120,7 @@ func TestConnectHTTPConfig(t *testing.T) { Database: "telegraf", RetentionPolicy: "default", WriteConsistency: "any", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), Username: "guy", Password: "smiley", UserAgent: "telegraf", @@ -153,7 +153,7 @@ func TestConnectHTTPConfig(t *testing.T) { require.Equal(t, output.URLs[0], actual.URL.String()) require.Equal(t, output.UserAgent, actual.UserAgent) - require.Equal(t, output.Timeout.Duration, actual.Timeout) + require.Equal(t, time.Duration(output.Timeout), actual.Timeout) require.Equal(t, output.Username, actual.Username) require.Equal(t, output.Password, actual.Password) require.Equal(t, output.HTTPProxy, actual.Proxy.String()) @@ -200,7 +200,7 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { err := output.Connect() require.NoError(t, err) - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -208,7 +208,6 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics := []telegraf.Metric{m} err = output.Write(metrics) diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 0add3c6c39de6..62848417b124c 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -115,7 +115,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error return nil } -func (c *udpClient) CreateDatabase(ctx context.Context, database string) error { +func (c *udpClient) CreateDatabase(_ context.Context, _ string) error { return nil } @@ -134,7 +134,6 @@ func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { if i := bytes.IndexByte(data, '\n'); i >= 0 { // We have a full newline-terminated line. return i + 1, data[0 : i+1], nil - } return 0, nil, nil } diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 2e60c586c7a03..25e03f72173ee 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -23,7 +23,7 @@ var ( ) func getMetric() telegraf.Metric { - metric, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -31,10 +31,8 @@ func getMetric() telegraf.Metric { }, time.Unix(0, 0), ) - if err != nil { - panic(err) - } - return metric + + return m } func getURL() *url.URL { @@ -62,7 +60,7 @@ type MockDialer struct { DialContextF func(network, address string) (influxdb.Conn, error) } -func (d *MockDialer) DialContext(ctx context.Context, network string, address string) (influxdb.Conn, error) { +func (d *MockDialer) DialContext(_ context.Context, network string, address string) (influxdb.Conn, error) { return d.DialContextF(network, address) } @@ -202,7 +200,7 @@ func TestUDP_ErrorLogging(t *testing.T) { }, metrics: []telegraf.Metric{ func() telegraf.Metric { - metric, _ := metric.New( + m := metric.New( "cpu", map[string]string{ "host": "example.org", @@ -210,7 +208,7 @@ func TestUDP_ErrorLogging(t *testing.T) { map[string]interface{}{}, time.Unix(0, 0), ) - return metric + return m }(), }, logContains: `could not serialize metric: "cpu,host=example.org": no serializable fields`, diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 2a32c5f4c60ea..c076580255740 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -7,8 +7,8 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" + "math" "net" "net/http" "net/url" @@ -35,9 +35,9 @@ func (e APIError) Error() string { } const ( - defaultRequestTimeout = time.Second * 5 - defaultMaxWait = 10 // seconds - defaultDatabase = "telegraf" + defaultRequestTimeout = time.Second * 5 + defaultMaxWaitSeconds = 60 + defaultMaxWaitRetryAfterSeconds = 10 * 60 ) type HTTPConfig struct { @@ -70,6 +70,7 @@ type httpClient struct { serializer *influx.Serializer url *url.URL retryTime time.Time + retryCount int } func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { @@ -169,7 +170,7 @@ func (g genericRespError) Error() string { func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { if c.retryTime.After(time.Now()) { - return errors.New("Retry time has not elapsed") + return errors.New("retry time has not elapsed") } batches := make(map[string][]telegraf.Metric) @@ -233,7 +234,18 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } defer resp.Body.Close() - if resp.StatusCode == http.StatusNoContent { + switch resp.StatusCode { + case + // this is the expected response: + http.StatusNoContent, + // but if we get these we should still accept it as delivered: + http.StatusOK, + http.StatusCreated, + http.StatusAccepted, + http.StatusPartialContent, + http.StatusMultiStatus, + http.StatusAlreadyReported: + c.retryCount = 0 return nil } @@ -245,33 +257,37 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } switch resp.StatusCode { - case http.StatusBadRequest, http.StatusRequestEntityTooLarge: - log.Printf("E! [outputs.influxdb_v2] Failed to write metric: %s\n", desc) + case + // request was malformed: + http.StatusBadRequest, + // request was too large: + http.StatusRequestEntityTooLarge, + // request was received but server refused to process it due to a semantic problem with the request. + // for example, submitting metrics outside the retention period. + // Clients should *not* repeat the request and the metrics should be dropped. + http.StatusUnprocessableEntity, + http.StatusNotAcceptable: + log.Printf("E! [outputs.influxdb_v2] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) return nil case http.StatusUnauthorized, http.StatusForbidden: - return fmt.Errorf("failed to write metric: %s", desc) - case http.StatusTooManyRequests: - retryAfter := resp.Header.Get("Retry-After") - retry, err := strconv.Atoi(retryAfter) - if err != nil { - return errors.New("rate limit exceeded") - } - if retry > defaultMaxWait { - retry = defaultMaxWait - } - c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) - return fmt.Errorf("waiting %ds for server before sending metric again", retry) - case http.StatusServiceUnavailable: - retryAfter := resp.Header.Get("Retry-After") - retry, err := strconv.Atoi(retryAfter) - if err != nil { - return errors.New("server responded: service unavailable") - } - if retry > defaultMaxWait { - retry = defaultMaxWait - } - c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) - return fmt.Errorf("waiting %ds for server before sending metric again", retry) + return fmt.Errorf("failed to write metric (%s): %s", resp.Status, desc) + case http.StatusTooManyRequests, + http.StatusServiceUnavailable, + http.StatusBadGateway, + http.StatusGatewayTimeout: + // ^ these handle the cases where the server is likely overloaded, and may not be able to say so. + c.retryCount++ + retryDuration := c.getRetryDuration(resp.Header) + c.retryTime = time.Now().Add(retryDuration) + log.Printf("W! [outputs.influxdb_v2] Failed to write; will retry in %s. (%s)\n", retryDuration, resp.Status) + return fmt.Errorf("waiting %s for server before sending metric again", retryDuration) + } + + // if it's any other 4xx code, the client should not retry as it's the client's mistake. + // retrying will not make the request magically work. + if len(resp.Status) > 0 && resp.Status[0] == '4' { + log.Printf("E! [outputs.influxdb_v2] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) + return nil } // This is only until platform spec is fully implemented. As of the @@ -287,6 +303,31 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } } +// retryDuration takes the longer of the Retry-After header and our own back-off calculation +func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { + // basic exponential backoff (x^2)/40 (denominator to widen the slope) + // at 40 denominator, it'll take 49 retries to hit the max defaultMaxWait of 60s + backoff := math.Pow(float64(c.retryCount), 2) / 40 + backoff = math.Min(backoff, defaultMaxWaitSeconds) + + // get any value from the header, if available + retryAfterHeader := float64(0) + retryAfterHeaderString := headers.Get("Retry-After") + if len(retryAfterHeaderString) > 0 { + var err error + retryAfterHeader, err = strconv.ParseFloat(retryAfterHeaderString, 64) + if err != nil { + // there was a value but we couldn't parse it? guess minimum 10 sec + retryAfterHeader = 10 + } + // protect against excessively large retry-after + retryAfterHeader = math.Min(retryAfterHeader, defaultMaxWaitRetryAfterSeconds) + } + // take the highest value of backoff and retry-after. + retry := math.Max(backoff, retryAfterHeader) + return time.Duration(retry*1000) * time.Millisecond +} + func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { var err error @@ -319,7 +360,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index e9685da129aa7..10e2a4e133eeb 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -1,8 +1,11 @@ package influxdb_v2 import ( + "fmt" + "net/http" "net/url" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -45,3 +48,53 @@ func TestMakeWriteURL(t *testing.T) { } } } + +func TestExponentialBackoffCalculation(t *testing.T) { + c := &httpClient{} + tests := []struct { + retryCount int + expected time.Duration + }{ + {retryCount: 0, expected: 0}, + {retryCount: 1, expected: 25 * time.Millisecond}, + {retryCount: 5, expected: 625 * time.Millisecond}, + {retryCount: 10, expected: 2500 * time.Millisecond}, + {retryCount: 30, expected: 22500 * time.Millisecond}, + {retryCount: 40, expected: 40 * time.Second}, + {retryCount: 50, expected: 60 * time.Second}, // max hit + {retryCount: 100, expected: 60 * time.Second}, + {retryCount: 1000, expected: 60 * time.Second}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_retries", test.retryCount), func(t *testing.T) { + c.retryCount = test.retryCount + require.EqualValues(t, test.expected, c.getRetryDuration(http.Header{})) + }) + } +} + +func TestExponentialBackoffCalculationWithRetryAfter(t *testing.T) { + c := &httpClient{} + tests := []struct { + retryCount int + retryAfter string + expected time.Duration + }{ + {retryCount: 0, retryAfter: "0", expected: 0}, + {retryCount: 0, retryAfter: "10", expected: 10 * time.Second}, + {retryCount: 0, retryAfter: "60", expected: 60 * time.Second}, + {retryCount: 0, retryAfter: "600", expected: 600 * time.Second}, + {retryCount: 0, retryAfter: "601", expected: 600 * time.Second}, // max hit + {retryCount: 40, retryAfter: "39", expected: 40 * time.Second}, // retryCount wins + {retryCount: 40, retryAfter: "41", expected: 41 * time.Second}, // retryAfter wins + {retryCount: 100, retryAfter: "100", expected: 100 * time.Second}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_retries", test.retryCount), func(t *testing.T) { + c.retryCount = test.retryCount + hdr := http.Header{} + hdr.Add("Retry-After", test.retryAfter) + require.EqualValues(t, test.expected, c.getRetryDuration(hdr)) + }) + } +} diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 23c3ff05e17b6..0637cd8060bd0 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -2,7 +2,7 @@ package influxdb_v2_test import ( "context" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -63,7 +63,7 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["bucket"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index 6076297f8c83a..e188ddbae94d1 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -4,13 +4,12 @@ import ( "context" "errors" "fmt" - "log" "math/rand" "net/url" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -88,7 +87,7 @@ type InfluxDB struct { Bucket string `toml:"bucket"` BucketTag string `toml:"bucket_tag"` ExcludeBucketTag bool `toml:"exclude_bucket_tag"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` HTTPHeaders map[string]string `toml:"http_headers"` HTTPProxy string `toml:"http_proxy"` UserAgent string `toml:"user_agent"` @@ -96,12 +95,12 @@ type InfluxDB struct { UintSupport bool `toml:"influx_uint_support"` tls.ClientConfig + Log telegraf.Logger `toml:"-"` + clients []Client } func (i *InfluxDB) Connect() error { - ctx := context.Background() - if len(i.URLs) == 0 { i.URLs = append(i.URLs, defaultURL) } @@ -122,7 +121,7 @@ func (i *InfluxDB) Connect() error { switch parts.Scheme { case "http", "https", "unix": - c, err := i.getHTTPClient(ctx, parts, proxy) + c, err := i.getHTTPClient(parts, proxy) if err != nil { return err } @@ -165,13 +164,13 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { return nil } - log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err) + i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) } return err } -func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) { +func (i *InfluxDB) getHTTPClient(url *url.URL, proxy *url.URL) (Client, error) { tlsConfig, err := i.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -184,7 +183,7 @@ func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.U Bucket: i.Bucket, BucketTag: i.BucketTag, ExcludeBucketTag: i.ExcludeBucketTag, - Timeout: i.Timeout.Duration, + Timeout: time.Duration(i.Timeout), Headers: i.HTTPHeaders, Proxy: proxy, UserAgent: i.UserAgent, @@ -213,7 +212,7 @@ func (i *InfluxDB) newSerializer() *influx.Serializer { func init() { outputs.Add("influxdb_v2", func() telegraf.Output { return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), ContentEncoding: "gzip", } }) diff --git a/plugins/outputs/influxdb_v2/influxdb_test.go b/plugins/outputs/influxdb_v2/influxdb_test.go index 90a3823915a5b..b16fd944d28db 100644 --- a/plugins/outputs/influxdb_v2/influxdb_test.go +++ b/plugins/outputs/influxdb_v2/influxdb_test.go @@ -94,7 +94,7 @@ func TestConnect(t *testing.T) { } } -func TestUnused(t *testing.T) { +func TestUnused(_ *testing.T) { thing := influxdb.InfluxDB{} thing.Close() thing.Description() diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index e5decbf7f065f..f7158f16fc4c3 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -4,13 +4,13 @@ import ( "bytes" "fmt" "io" - "log" "net" "regexp" "strings" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/graphite" @@ -22,14 +22,16 @@ var ( ) type Instrumental struct { - Host string - ApiToken string - Prefix string - DataFormat string - Template string - Templates []string - Timeout internal.Duration - Debug bool + Host string `toml:"host"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + DataFormat string `toml:"data_format"` + Template string `toml:"template"` + Templates []string `toml:"templates"` + Timeout config.Duration `toml:"timeout"` + Debug bool `toml:"debug"` + + Log telegraf.Logger `toml:"-"` conn net.Conn } @@ -56,7 +58,7 @@ var sampleConfig = ` ` func (i *Instrumental) Connect() error { - connection, err := net.DialTimeout("tcp", i.Host+":8000", i.Timeout.Duration) + connection, err := net.DialTimeout("tcp", i.Host+":8000", time.Duration(i.Timeout)) if err != nil { i.conn = nil @@ -82,11 +84,11 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { if i.conn == nil { err := i.Connect() if err != nil { - return fmt.Errorf("FAILED to (re)connect to Instrumental. Error: %s\n", err) + return fmt.Errorf("failed to (re)connect to Instrumental. Error: %s", err) } } - s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, ".", i.Templates) + s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, "strict", ".", i.Templates) if err != nil { return err } @@ -111,7 +113,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { buf, err := s.Serialize(m) if err != nil { - log.Printf("D! [outputs.instrumental] Could not serialize metric: %v", err) + i.Log.Debugf("Could not serialize metric: %v", err) continue } @@ -139,10 +141,10 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { time := splitStat[2] // replace invalid components of metric name with underscore - clean_metric := MetricNameReplacer.ReplaceAllString(name, "_") + cleanMetric := MetricNameReplacer.ReplaceAllString(name, "_") if !ValueIncludesBadChar.MatchString(value) { - points = append(points, fmt.Sprintf("%s %s %s %s", metricType, clean_metric, value, time)) + points = append(points, fmt.Sprintf("%s %s %s %s", metricType, cleanMetric, value, time)) } } } @@ -175,7 +177,7 @@ func (i *Instrumental) SampleConfig() string { } func (i *Instrumental) authenticate(conn net.Conn) error { - _, err := fmt.Fprintf(conn, HandshakeFormat, i.ApiToken) + _, err := fmt.Fprintf(conn, HandshakeFormat, i.APIToken) if err != nil { return err } @@ -187,7 +189,7 @@ func (i *Instrumental) authenticate(conn net.Conn) error { } if string(responses)[:6] != "ok\nok\n" { - return fmt.Errorf("Authentication failed: %s", responses) + return fmt.Errorf("authentication failed: %s", responses) } i.conn = conn @@ -198,7 +200,7 @@ func init() { outputs.Add("instrumental", func() telegraf.Output { return &Instrumental{ Host: DefaultHost, - Template: graphite.DEFAULT_TEMPLATE, + Template: graphite.DefaultTemplate, } }) } diff --git a/plugins/outputs/instrumental/instrumental_test.go b/plugins/outputs/instrumental/instrumental_test.go index 0d3ce904008e6..f72b9e90f0806 100644 --- a/plugins/outputs/instrumental/instrumental_test.go +++ b/plugins/outputs/instrumental/instrumental_test.go @@ -20,18 +20,18 @@ func TestWrite(t *testing.T) { i := Instrumental{ Host: "127.0.0.1", - ApiToken: "abc123token", + APIToken: "abc123token", Prefix: "my.prefix", } // Default to gauge - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "metric_type": "set"}, map[string]interface{}{"value": float64(3.14)}, @@ -42,27 +42,27 @@ func TestWrite(t *testing.T) { i.Write(metrics) // Counter and Histogram are increments - m3, _ := metric.New( + m3 := metric.New( "my_histogram", map[string]string{"host": "192.168.0.1", "metric_type": "histogram"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) // We will modify metric names that won't be accepted by Instrumental - m4, _ := metric.New( + m4 := metric.New( "bad_metric_name", map[string]string{"host": "192.168.0.1:8888::123", "metric_type": "counter"}, map[string]interface{}{"value": 1}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) // We will drop metric values that won't be accepted by Instrumental - m5, _ := metric.New( + m5 := metric.New( "bad_values", map[string]string{"host": "192.168.0.1", "metric_type": "counter"}, map[string]interface{}{"value": "\" 3:30\""}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m6, _ := metric.New( + m6 := metric.New( "my_counter", map[string]string{"host": "192.168.0.1", "metric_type": "counter"}, map[string]interface{}{"value": float64(3.14)}, diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index d1cc9f0cbb18b..e76522018fb4a 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -72,13 +72,18 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## routing_key = "telegraf" # routing_key = "" - ## CompressionCodec represents the various compression codecs recognized by + ## Compression codec represents the various compression codecs recognized by ## Kafka in messages. - ## 0 : No compression - ## 1 : Gzip compression - ## 2 : Snappy compression - ## 3 : LZ4 compression - # compression_codec = 0 + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 + + ## Idempotent Writes + ## If enabled, exactly one copy of each message is written. + # idempotent_writes = false ## RequiredAcks is used in Produce Requests to tell the broker how many ## replica acknowledgements it must see before responding @@ -111,6 +116,23 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 26a0c5bdb9a65..d30c730cfac18 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -1,7 +1,6 @@ package kafka import ( - "crypto/tls" "fmt" "log" "strings" @@ -11,7 +10,6 @@ import ( "github.com/gofrs/uuid" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/kafka" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -24,53 +22,39 @@ var ValidTopicSuffixMethods = []string{ var zeroTime = time.Unix(0, 0) -type ( - Kafka struct { - Brokers []string `toml:"brokers"` - Topic string `toml:"topic"` - TopicTag string `toml:"topic_tag"` - ExcludeTopicTag bool `toml:"exclude_topic_tag"` - ClientID string `toml:"client_id"` - TopicSuffix TopicSuffix `toml:"topic_suffix"` - RoutingTag string `toml:"routing_tag"` - RoutingKey string `toml:"routing_key"` - CompressionCodec int `toml:"compression_codec"` - RequiredAcks int `toml:"required_acks"` - MaxRetry int `toml:"max_retry"` - MaxMessageBytes int `toml:"max_message_bytes"` - - Version string `toml:"version"` - - // Legacy TLS config options - // TLS client certificate - Certificate string - // TLS client key - Key string - // TLS certificate authority - CA string - - EnableTLS *bool `toml:"enable_tls"` - tlsint.ClientConfig - - SASLUsername string `toml:"sasl_username"` - SASLPassword string `toml:"sasl_password"` - SASLVersion *int `toml:"sasl_version"` - - Log telegraf.Logger `toml:"-"` - - tlsConfig tls.Config - - producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) - producer sarama.SyncProducer - - serializer serializers.Serializer - } - TopicSuffix struct { - Method string `toml:"method"` - Keys []string `toml:"keys"` - Separator string `toml:"separator"` - } -) +type Kafka struct { + Brokers []string `toml:"brokers"` + Topic string `toml:"topic"` + TopicTag string `toml:"topic_tag"` + ExcludeTopicTag bool `toml:"exclude_topic_tag"` + TopicSuffix TopicSuffix `toml:"topic_suffix"` + RoutingTag string `toml:"routing_tag"` + RoutingKey string `toml:"routing_key"` + + // Legacy TLS config options + // TLS client certificate + Certificate string + // TLS client key + Key string + // TLS certificate authority + CA string + + kafka.WriteConfig + + Log telegraf.Logger `toml:"-"` + + saramaConfig *sarama.Config + producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) + producer sarama.SyncProducer + + serializer serializers.Serializer +} + +type TopicSuffix struct { + Method string `toml:"method"` + Keys []string `toml:"keys"` + Separator string `toml:"separator"` +} // DebugLogger logs messages from sarama at the debug level. type DebugLogger struct { @@ -80,7 +64,6 @@ func (*DebugLogger) Print(v ...interface{}) { args := make([]interface{}, 0, len(v)+1) args = append(append(args, "D! [sarama] "), v...) log.Print(args...) - } func (*DebugLogger) Printf(format string, v ...interface{}) { @@ -161,14 +144,19 @@ var sampleConfig = ` ## routing_key = "telegraf" # routing_key = "" - ## CompressionCodec represents the various compression codecs recognized by + ## Compression codec represents the various compression codecs recognized by ## Kafka in messages. - ## 0 : No compression - ## 1 : Gzip compression - ## 2 : Snappy compression - ## 3 : LZ4 compression + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD # compression_codec = 0 + ## Idempotent Writes + ## If enabled, exactly one copy of each message is written. + # idempotent_writes = false + ## RequiredAcks is used in Produce Requests to tell the broker how many ## replica acknowledgements it must see before responding ## 0 : the producer never waits for an acknowledgement from the broker. @@ -194,7 +182,6 @@ var sampleConfig = ` # max_message_bytes = 1000000 ## Optional TLS Config - # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -205,6 +192,23 @@ var sampleConfig = ` # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 @@ -264,35 +268,18 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func (k *Kafka) Connect() error { +func (k *Kafka) Init() error { err := ValidateTopicSuffixMethod(k.TopicSuffix.Method) if err != nil { return err } config := sarama.NewConfig() - if k.Version != "" { - version, err := sarama.ParseKafkaVersion(k.Version) - if err != nil { - return err - } - config.Version = version - } - - if k.ClientID != "" { - config.ClientID = k.ClientID - } else { - config.ClientID = "Telegraf" + if err := k.SetConfig(config); err != nil { + return err } - config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) - config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) - config.Producer.Retry.Max = k.MaxRetry - config.Producer.Return.Successes = true - - if k.MaxMessageBytes > 0 { - config.Producer.MaxMessageBytes = k.MaxMessageBytes - } + k.saramaConfig = config // Legacy support ssl config if k.Certificate != "" { @@ -301,39 +288,11 @@ func (k *Kafka) Connect() error { k.TLSKey = k.Key } - if k.EnableTLS != nil && *k.EnableTLS { - config.Net.TLS.Enable = true - } - - tlsConfig, err := k.ClientConfig.TLSConfig() - if err != nil { - return err - } - - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - - // To maintain backwards compatibility, if the enable_tls option is not - // set TLS is enabled if a non-default TLS config is used. - if k.EnableTLS == nil { - k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") - config.Net.TLS.Enable = true - } - } - - if k.SASLUsername != "" && k.SASLPassword != "" { - config.Net.SASL.User = k.SASLUsername - config.Net.SASL.Password = k.SASLPassword - config.Net.SASL.Enable = true - - version, err := kafka.SASLVersion(config.Version, k.SASLVersion) - if err != nil { - return err - } - config.Net.SASL.Version = version - } + return nil +} - producer, err := k.producerFunc(k.Brokers, config) +func (k *Kafka) Connect() error { + producer, err := k.producerFunc(k.Brokers, k.saramaConfig) if err != nil { return err } @@ -430,8 +389,10 @@ func init() { sarama.Logger = &DebugLogger{} outputs.Add("kafka", func() telegraf.Output { return &Kafka{ - MaxRetry: 3, - RequiredAcks: -1, + WriteConfig: kafka.WriteConfig{ + MaxRetry: 3, + RequiredAcks: -1, + }, producerFunc: sarama.NewSyncProducer, } }) diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 070eea3f91d9c..0edaed31f41f3 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -17,7 +17,7 @@ type topicSuffixTestpair struct { expectedTopic string } -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -25,13 +25,16 @@ func TestConnectAndWrite(t *testing.T) { brokers := []string{testutil.GetLocalHost() + ":9092"} s, _ := serializers.NewInfluxSerializer() k := &Kafka{ - Brokers: brokers, - Topic: "Test", - serializer: s, + Brokers: brokers, + Topic: "Test", + serializer: s, + producerFunc: sarama.NewSyncProducer, } // Verify that we can connect to the Kafka broker - err := k.Connect() + err := k.Init() + require.NoError(t, err) + err = k.Connect() require.NoError(t, err) // Verify that we can successfully write data to the kafka broker @@ -40,7 +43,7 @@ func TestConnectAndWrite(t *testing.T) { k.Close() } -func TestTopicSuffixes(t *testing.T) { +func TestTopicSuffixesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -87,7 +90,7 @@ func TestTopicSuffixes(t *testing.T) { } } -func TestValidateTopicSuffixMethod(t *testing.T) { +func TestValidateTopicSuffixMethodIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -114,7 +117,7 @@ func TestRoutingKey(t *testing.T) { RoutingKey: "static", }, metric: func() telegraf.Metric { - m, _ := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -134,7 +137,7 @@ func TestRoutingKey(t *testing.T) { RoutingKey: "random", }, metric: func() telegraf.Metric { - m, _ := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -176,7 +179,7 @@ func (p *MockProducer) Close() error { return nil } -func NewMockProducer(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) { +func NewMockProducer(_ []string, _ *sarama.Config) (sarama.SyncProducer, error) { return &MockProducer{}, nil } diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index 1931dacb91f89..2d909090b69ad 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -13,12 +13,16 @@ maybe useful for users to review Amazons official documentation which is availab This plugin uses a credential chain for Authentication with the Kinesis API endpoint. In the following order the plugin will attempt to authenticate. -1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) -2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes -3. Shared profile from `profile` attribute -4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) -5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) -6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +1. Web identity provider credentials via STS if `role_arn` and `web_identity_token_file` are specified +2. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) +3. Explicit credentials from `access_key`, `secret_key`, and `token` attributes +4. Shared profile from `profile` attribute +5. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +6. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) +7. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) + +If you are using credentials from a web identity provider, you can specify the session name using `role_session_name`. If +left empty, the current timestamp will be used. ## Config diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 88620fa70d3f9..56858340887f5 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -1,11 +1,12 @@ package kinesis import ( - "log" + "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/gofrs/uuid" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -13,25 +14,22 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" ) +// Limit set by AWS (https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html) +const maxRecordsPerRequest uint32 = 500 + type ( KinesisOutput struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` - StreamName string `toml:"streamname"` PartitionKey string `toml:"partitionkey"` RandomPartitionKey bool `toml:"use_random_partitionkey"` Partition *Partition `toml:"partition"` Debug bool `toml:"debug"` - svc *kinesis.Kinesis + Log telegraf.Logger `toml:"-"` serializer serializers.Serializer + svc kinesisClient + + internalaws.CredentialConfig } Partition struct { @@ -41,22 +39,29 @@ type ( } ) +type kinesisClient interface { + PutRecords(context.Context, *kinesis.PutRecordsInput, ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) +} + var sampleConfig = ` ## Amazon REGION of kinesis endpoint. region = "ap-southeast-2" ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile #access_key = "" #secret_key = "" #token = "" #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" #profile = "" #shared_credential_file = "" @@ -117,29 +122,23 @@ func (k *KinesisOutput) Description() string { func (k *KinesisOutput) Connect() error { if k.Partition == nil { - log.Print("E! kinesis : Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition") + k.Log.Error("Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition") } // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using // environment variables, and then Shared Credentials. if k.Debug { - log.Printf("I! kinesis: Establishing a connection to Kinesis in %s", k.Region) + k.Log.Infof("Establishing a connection to Kinesis in %s", k.Region) } - credentialConfig := &internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, + cfg, err := k.CredentialConfig.Credentials() + if err != nil { + return err } - configProvider := credentialConfig.Credentials() - svc := kinesis.New(configProvider) - _, err := svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ + svc := kinesis.NewFromConfig(cfg) + + _, err = svc.DescribeStreamSummary(context.Background(), &kinesis.DescribeStreamSummaryInput{ StreamName: aws.String(k.StreamName), }) k.svc = svc @@ -154,26 +153,28 @@ func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Duration { +func (k *KinesisOutput) writeKinesis(r []types.PutRecordsRequestEntry) time.Duration { start := time.Now() payload := &kinesis.PutRecordsInput{ Records: r, StreamName: aws.String(k.StreamName), } + resp, err := k.svc.PutRecords(context.Background(), payload) + if err != nil { + k.Log.Errorf("Unable to write to Kinesis : %s", err.Error()) + return time.Since(start) + } + if k.Debug { - resp, err := k.svc.PutRecords(payload) - if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) - } - log.Printf("I! Wrote: '%+v'", resp) + k.Log.Infof("Wrote: '%+v'", resp) + } - } else { - _, err := k.svc.PutRecords(payload) - if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) - } + failed := *resp.FailedRecordCount + if failed > 0 { + k.Log.Errorf("Unable to write %+v of %+v record(s) to Kinesis", failed, len(r)) } + return time.Since(start) } @@ -199,7 +200,7 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { // Default partition name if default is not set return "telegraf" default: - log.Printf("E! kinesis : You have configured a Partition method of '%s' which is not supported", k.Partition.Method) + k.Log.Errorf("You have configured a Partition method of '%s' which is not supported", k.Partition.Method) } } if k.RandomPartitionKey { @@ -219,38 +220,36 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { return nil } - r := []*kinesis.PutRecordsRequestEntry{} + r := []types.PutRecordsRequestEntry{} for _, metric := range metrics { sz++ values, err := k.serializer.Serialize(metric) if err != nil { - log.Printf("D! [outputs.kinesis] Could not serialize metric: %v", err) + k.Log.Debugf("Could not serialize metric: %v", err) continue } partitionKey := k.getPartitionKey(metric) - d := kinesis.PutRecordsRequestEntry{ + d := types.PutRecordsRequestEntry{ Data: values, PartitionKey: aws.String(partitionKey), } - r = append(r, &d) + r = append(r, d) - if sz == 500 { - // Max Messages Per PutRecordRequest is 500 - elapsed := writekinesis(k, r) - log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) + if sz == maxRecordsPerRequest { + elapsed := k.writeKinesis(r) + k.Log.Debugf("Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) sz = 0 r = nil } - } if sz > 0 { - elapsed := writekinesis(k, r) - log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) + elapsed := k.writeKinesis(r) + k.Log.Debugf("Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) } return nil diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 9d4f6729be53c..89724ef1805d2 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -1,19 +1,34 @@ package kinesis import ( + "context" + "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/gofrs/uuid" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestPartitionKey(t *testing.T) { +const testPartitionKey = "partitionKey" +const testShardID = "shardId-000000000003" +const testSequenceNumber = "49543463076570308322303623326179887152428262250726293588" +const testStreamName = "streamName" +const zero int64 = 0 +func TestPartitionKey(t *testing.T) { assert := assert.New(t) testPoint := testutil.TestMetric(1) k := KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "static", Key: "-", @@ -22,6 +37,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "tag", Key: "tag1", @@ -30,6 +46,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal(testPoint.Tags()["tag1"], k.getPartitionKey(testPoint), "PartitionKey should be value of 'tag1'") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "tag", Key: "doesnotexist", @@ -39,6 +56,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("somedefault", k.getPartitionKey(testPoint), "PartitionKey should use default") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "tag", Key: "doesnotexist", @@ -47,6 +65,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("telegraf", k.getPartitionKey(testPoint), "PartitionKey should be telegraf") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "not supported", }, @@ -54,6 +73,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("", k.getPartitionKey(testPoint), "PartitionKey should be value of ''") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "measurement", }, @@ -61,6 +81,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal(testPoint.Name(), k.getPartitionKey(testPoint), "PartitionKey should be value of measurement name") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "random", }, @@ -71,11 +92,13 @@ func TestPartitionKey(t *testing.T) { assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4") k = KinesisOutput{ + Log: testutil.Logger{}, PartitionKey: "-", } assert.Equal("-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") k = KinesisOutput{ + Log: testutil.Logger{}, RandomPartitionKey: true, } partitionKey = k.getPartitionKey(testPoint) @@ -83,3 +106,520 @@ func TestPartitionKey(t *testing.T) { assert.Nil(err, "Issue parsing UUID") assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4") } + +func TestWriteKinesis_WhenSuccess(t *testing.T) { + assert := assert.New(t) + + records := []types.PutRecordsRequestEntry{ + { + PartitionKey: aws.String(testPartitionKey), + Data: []byte{0x65}, + }, + } + + svc := &mockKinesisPutRecords{} + svc.SetupResponse( + 0, + []types.PutRecordsResultEntry{ + { + SequenceNumber: aws.String(testSequenceNumber), + ShardId: aws.String(testShardID), + }, + }, + ) + + k := KinesisOutput{ + Log: testutil.Logger{}, + StreamName: testStreamName, + svc: svc, + } + + elapsed := k.writeKinesis(records) + assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: records, + }, + }) +} + +func TestWriteKinesis_WhenRecordErrors(t *testing.T) { + assert := assert.New(t) + + records := []types.PutRecordsRequestEntry{ + { + PartitionKey: aws.String(testPartitionKey), + Data: []byte{0x66}, + }, + } + + svc := &mockKinesisPutRecords{} + svc.SetupResponse( + 1, + []types.PutRecordsResultEntry{ + { + ErrorCode: aws.String("InternalFailure"), + ErrorMessage: aws.String("Internal Service Failure"), + }, + }, + ) + + k := KinesisOutput{ + Log: testutil.Logger{}, + StreamName: testStreamName, + svc: svc, + } + + elapsed := k.writeKinesis(records) + assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: records, + }, + }) +} + +func TestWriteKinesis_WhenServiceError(t *testing.T) { + assert := assert.New(t) + + records := []types.PutRecordsRequestEntry{ + { + PartitionKey: aws.String(testPartitionKey), + Data: []byte{}, + }, + } + + svc := &mockKinesisPutRecords{} + svc.SetupErrorResponse( + &types.InvalidArgumentException{Message: aws.String("Invalid record")}, + ) + + k := KinesisOutput{ + Log: testutil.Logger{}, + StreamName: testStreamName, + svc: svc, + } + + elapsed := k.writeKinesis(records) + assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: records, + }, + }) +} + +func TestWrite_NoMetrics(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + svc := &mockKinesisPutRecords{} + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: "partitionKey", + }, + StreamName: "stream", + serializer: serializer, + svc: svc, + } + + err := k.Write([]telegraf.Metric{}) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{}) +} + +func TestWrite_SingleMetric(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(1, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: testPartitionKey, + }, + StreamName: testStreamName, + serializer: serializer, + svc: svc, + } + + metric, metricData := createTestMetric(t, "metric1", serializer) + err := k.Write([]telegraf.Metric{metric}) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: []types.PutRecordsRequestEntry{ + { + PartitionKey: aws.String(testPartitionKey), + Data: metricData, + }, + }, + }, + }) +} + +func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(3, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: testPartitionKey, + }, + StreamName: testStreamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, 3, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: createPutRecordsRequestEntries( + metricsData, + ), + }, + }) +} + +func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: testPartitionKey, + }, + StreamName: testStreamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: createPutRecordsRequestEntries( + metricsData, + ), + }, + }) +} + +func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + svc.SetupGenericResponse(1, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: testPartitionKey, + }, + StreamName: testStreamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest+1, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: createPutRecordsRequestEntries( + metricsData[0:maxRecordsPerRequest], + ), + }, + { + StreamName: aws.String(testStreamName), + Records: createPutRecordsRequestEntries( + metricsData[maxRecordsPerRequest:], + ), + }, + }) +} + +func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: testPartitionKey, + }, + StreamName: testStreamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest*2, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: createPutRecordsRequestEntries( + metricsData[0:maxRecordsPerRequest], + ), + }, + { + StreamName: aws.String(testStreamName), + Records: createPutRecordsRequestEntries( + metricsData[maxRecordsPerRequest:], + ), + }, + }) +} + +func TestWrite_SerializerError(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(2, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: testPartitionKey, + }, + StreamName: testStreamName, + serializer: serializer, + svc: svc, + } + + metric1, metric1Data := createTestMetric(t, "metric1", serializer) + metric2, metric2Data := createTestMetric(t, "metric2", serializer) + + // metric is invalid because of empty name + invalidMetric := testutil.TestMetric(3, "") + + err := k.Write([]telegraf.Metric{ + metric1, + invalidMetric, + metric2, + }) + assert.Nil(err, "Should not return error") + + // remaining valid metrics should still get written + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ + { + StreamName: aws.String(testStreamName), + Records: []types.PutRecordsRequestEntry{ + { + PartitionKey: aws.String(testPartitionKey), + Data: metric1Data, + }, + { + PartitionKey: aws.String(testPartitionKey), + Data: metric2Data, + }, + }, + }, + }) +} + +type mockKinesisPutRecordsResponse struct { + Output *kinesis.PutRecordsOutput + Err error +} + +type mockKinesisPutRecords struct { + requests []*kinesis.PutRecordsInput + responses []*mockKinesisPutRecordsResponse +} + +func (m *mockKinesisPutRecords) SetupResponse( + failedRecordCount int32, + records []types.PutRecordsResultEntry, +) { + m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ + Err: nil, + Output: &kinesis.PutRecordsOutput{ + FailedRecordCount: aws.Int32(failedRecordCount), + Records: records, + }, + }) +} + +func (m *mockKinesisPutRecords) SetupGenericResponse( + successfulRecordCount uint32, + failedRecordCount int32, +) { + records := []types.PutRecordsResultEntry{} + + for i := uint32(0); i < successfulRecordCount; i++ { + records = append(records, types.PutRecordsResultEntry{ + SequenceNumber: aws.String(testSequenceNumber), + ShardId: aws.String(testShardID), + }) + } + + for i := int32(0); i < failedRecordCount; i++ { + records = append(records, types.PutRecordsResultEntry{ + ErrorCode: aws.String("InternalFailure"), + ErrorMessage: aws.String("Internal Service Failure"), + }) + } + + m.SetupResponse(failedRecordCount, records) +} + +func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { + m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ + Err: err, + Output: nil, + }) +} + +func (m *mockKinesisPutRecords) PutRecords(_ context.Context, input *kinesis.PutRecordsInput, _ ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) { + reqNum := len(m.requests) + if reqNum > len(m.responses) { + return nil, fmt.Errorf("Response for request %+v not setup", reqNum) + } + + m.requests = append(m.requests, input) + + resp := m.responses[reqNum] + return resp.Output, resp.Err +} + +func (m *mockKinesisPutRecords) AssertRequests( + t *testing.T, + expected []*kinesis.PutRecordsInput, +) { + require.Equalf(t, + len(expected), + len(m.requests), + "Expected %v requests", len(expected), + ) + + for i, expectedInput := range expected { + actualInput := m.requests[i] + + require.Equalf(t, + expectedInput.StreamName, + actualInput.StreamName, + "Expected request %v to have correct StreamName", i, + ) + + require.Equalf(t, + len(expectedInput.Records), + len(actualInput.Records), + "Expected request %v to have %v Records", i, len(expectedInput.Records), + ) + + for r, expectedRecord := range expectedInput.Records { + actualRecord := actualInput.Records[r] + + require.Equalf(t, + expectedRecord.PartitionKey, + actualRecord.PartitionKey, + "Expected (request %v, record %v) to have correct PartitionKey", i, r, + ) + + require.Equalf(t, + expectedRecord.ExplicitHashKey, + actualRecord.ExplicitHashKey, + "Expected (request %v, record %v) to have correct ExplicitHashKey", i, r, + ) + + require.Equalf(t, + expectedRecord.Data, + actualRecord.Data, + "Expected (request %v, record %v) to have correct Data", i, r, + ) + } + } +} + +func createTestMetric( + t *testing.T, + name string, + serializer serializers.Serializer, +) (telegraf.Metric, []byte) { + metric := testutil.TestMetric(1, name) + + data, err := serializer.Serialize(metric) + require.NoError(t, err) + + return metric, data +} + +func createTestMetrics( + t *testing.T, + count uint32, + serializer serializers.Serializer, +) ([]telegraf.Metric, [][]byte) { + metrics := make([]telegraf.Metric, count) + metricsData := make([][]byte, count) + + for i := uint32(0); i < count; i++ { + name := fmt.Sprintf("metric%d", i) + metric, data := createTestMetric(t, name, serializer) + metrics[i] = metric + metricsData[i] = data + } + + return metrics, metricsData +} + +func createPutRecordsRequestEntries( + metricsData [][]byte, +) []types.PutRecordsRequestEntry { + count := len(metricsData) + records := make([]types.PutRecordsRequestEntry, count) + + for i := 0; i < count; i++ { + records[i] = types.PutRecordsRequestEntry{ + PartitionKey: aws.String(testPartitionKey), + Data: metricsData[i], + } + } + + return records +} diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 53bb8c1249188..dc1e9b6fa7856 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -4,25 +4,26 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" - "log" + "io" "net/http" "regexp" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/graphite" ) // Librato structure for configuration and client type Librato struct { - APIUser string `toml:"api_user"` - APIToken string `toml:"api_token"` - Debug bool - SourceTag string // Deprecated, keeping for backward-compatibility - Timeout internal.Duration - Template string + APIUser string `toml:"api_user"` + APIToken string `toml:"api_token"` + Debug bool `toml:"debug"` + SourceTag string `toml:"source_tag"` // Deprecated, keeping for backward-compatibility + Timeout config.Duration `toml:"timeout"` + Template string `toml:"template"` + Log telegraf.Logger `toml:"-"` APIUrl string client *http.Client @@ -83,13 +84,12 @@ func (l *Librato) Connect() error { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: l.Timeout.Duration, + Timeout: time.Duration(l.Timeout), } return nil } func (l *Librato) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { return nil } @@ -106,12 +106,11 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { if gauges, err := l.buildGauges(m); err == nil { for _, gauge := range gauges { tempGauges = append(tempGauges, gauge) - log.Printf("D! Got a gauge: %v\n", gauge) + l.Log.Debugf("Got a gauge: %v", gauge) } } else { - log.Printf("I! unable to build Gauge for %s, skipping\n", m.Name()) - log.Printf("D! Couldn't build gauge: %v\n", err) - + l.Log.Infof("Unable to build Gauge for %s, skipping", m.Name()) + l.Log.Debugf("Couldn't build gauge: %v", err) } } @@ -129,34 +128,32 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { copy(lmetrics.Gauges, tempGauges[start:end]) metricsBytes, err := json.Marshal(lmetrics) if err != nil { - return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error()) + return fmt.Errorf("unable to marshal Metrics, %s", err.Error()) } - log.Printf("D! Librato request: %v\n", string(metricsBytes)) + l.Log.Debugf("Librato request: %v", string(metricsBytes)) req, err := http.NewRequest( "POST", l.APIUrl, bytes.NewBuffer(metricsBytes)) if err != nil { - return fmt.Errorf( - "unable to create http.Request, %s\n", - err.Error()) + return fmt.Errorf("unable to create http.Request, %s", err.Error()) } req.Header.Add("Content-Type", "application/json") req.SetBasicAuth(l.APIUser, l.APIToken) resp, err := l.client.Do(req) if err != nil { - log.Printf("D! Error POSTing metrics: %v\n", err.Error()) - return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + l.Log.Debugf("Error POSTing metrics: %v", err.Error()) + return fmt.Errorf("error POSTing metrics, %s", err.Error()) } defer resp.Body.Close() if resp.StatusCode != 200 || l.Debug { - htmlData, err := ioutil.ReadAll(resp.Body) + htmlData, err := io.ReadAll(resp.Body) if err != nil { - log.Printf("D! Couldn't get response! (%v)\n", err) + l.Log.Debugf("Couldn't get response! (%v)", err) } if resp.StatusCode != 200 { return fmt.Errorf( @@ -164,7 +161,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { resp.StatusCode, string(htmlData)) } - log.Printf("D! Librato response: %v\n", string(htmlData)) + l.Log.Debugf("Librato response: %v", string(htmlData)) } } @@ -183,7 +180,6 @@ func (l *Librato) Description() string { } func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { - gauges := []*Gauge{} if m.Time().Unix() == 0 { return gauges, fmt.Errorf("time was zero %s", m.Name()) @@ -193,11 +189,9 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { "value") if metricSource == "" { return gauges, - fmt.Errorf("undeterminable Source type from Field, %s\n", - l.Template) + fmt.Errorf("undeterminable Source type from Field, %s", l.Template) } for fieldName, value := range m.Fields() { - metricName := m.Name() if fieldName != "value" { metricName = fmt.Sprintf("%s.%s", m.Name(), fieldName) @@ -212,14 +206,12 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { continue } if err := gauge.setValue(value); err != nil { - return gauges, fmt.Errorf( - "unable to extract value from Fields, %s\n", - err.Error()) + return gauges, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) } gauges = append(gauges, gauge) } - log.Printf("D! Built gauges: %v\n", gauges) + l.Log.Debugf("Built gauges: %v", gauges) return gauges, nil } @@ -234,7 +226,7 @@ func verifyValue(v interface{}) bool { func (g *Gauge) setValue(v interface{}) error { switch d := v.(type) { case int64: - g.Value = float64(int64(d)) + g.Value = float64(d) case uint64: g.Value = float64(d) case float64: diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index fe39313742751..f88ced5b67f33 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -10,19 +10,17 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) var ( - fakeURL = "http://test.librato.com" - fakeUser = "telegraf@influxdb.com" - fakeToken = "123456" + fakeURL = "http://test.librato.com" ) -func fakeLibrato() *Librato { - l := NewLibrato(fakeURL) - l.APIUser = fakeUser - l.APIToken = fakeToken +func newTestLibrato(testURL string) *Librato { + l := NewLibrato(testURL) + l.Log = testutil.Logger{} return l } @@ -34,7 +32,7 @@ func TestUriOverride(t *testing.T) { })) defer ts.Close() - l := NewLibrato(ts.URL) + l := newTestLibrato(ts.URL) l.APIUser = "telegraf@influxdb.com" l.APIToken = "123456" err := l.Connect() @@ -50,7 +48,7 @@ func TestBadStatusCode(t *testing.T) { })) defer ts.Close() - l := NewLibrato(ts.URL) + l := newTestLibrato(ts.URL) l.APIUser = "telegraf@influxdb.com" l.APIToken = "123456" err := l.Connect() @@ -66,7 +64,6 @@ func TestBadStatusCode(t *testing.T) { } func TestBuildGauge(t *testing.T) { - mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix() var gaugeTests = []struct { ptIn telegraf.Metric @@ -140,7 +137,7 @@ func TestBuildGauge(t *testing.T) { }, } - l := NewLibrato(fakeURL) + l := newTestLibrato(fakeURL) for _, gt := range gaugeTests { gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { @@ -164,7 +161,7 @@ func TestBuildGauge(t *testing.T) { } func newHostMetric(value interface{}, name, host string) telegraf.Metric { - m, _ := metric.New( + m := metric.New( name, map[string]string{"host": host}, map[string]interface{}{"value": value}, @@ -175,19 +172,19 @@ func newHostMetric(value interface{}, name, host string) telegraf.Metric { func TestBuildGaugeWithSource(t *testing.T) { mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - pt1, _ := metric.New( + pt1 := metric.New( "test1", map[string]string{"hostname": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 0.0}, mtime, ) - pt2, _ := metric.New( + pt2 := metric.New( "test2", map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 1.0}, mtime, ) - pt3, _ := metric.New( + pt3 := metric.New( "test3", map[string]string{ "hostname": "192.168.0.1", @@ -196,7 +193,7 @@ func TestBuildGaugeWithSource(t *testing.T) { map[string]interface{}{"value": 1.0}, mtime, ) - pt4, _ := metric.New( + pt4 := metric.New( "test4", map[string]string{ "hostname": "192.168.0.1", @@ -257,7 +254,7 @@ func TestBuildGaugeWithSource(t *testing.T) { }, } - l := NewLibrato(fakeURL) + l := newTestLibrato(fakeURL) for _, gt := range gaugeTests { l.Template = gt.template gauges, err := l.buildGauges(gt.ptIn) diff --git a/plugins/outputs/logzio/README.md b/plugins/outputs/logzio/README.md new file mode 100644 index 0000000000000..5cf61233e3274 --- /dev/null +++ b/plugins/outputs/logzio/README.md @@ -0,0 +1,43 @@ +# Logz.io Output Plugin + +This plugin sends metrics to Logz.io over HTTPs. + +### Configuration: + +```toml +# A plugin that can send metrics over HTTPs to Logz.io +[[outputs.logzio]] + ## Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue. + # check_disk_space = true + + ## The percent of used file system space at which the sender will stop queueing. + ## When we will reach that percentage, the file system in which the queue is stored will drop + ## all new logs until the percentage of used space drops below that threshold. + # disk_threshold = 98 + + ## How often Logz.io sender should drain the queue. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + # drain_duration = "3s" + + ## Where Logz.io sender should store the queue + ## queue_dir = Sprintf("%s%s%s%s%d", os.TempDir(), string(os.PathSeparator), + ## "logzio-buffer", string(os.PathSeparator), time.Now().UnixNano()) + + ## Logz.io account token + token = "your Logz.io token" # required + + ## Use your listener URL for your Logz.io account region. + # url = "https://listener.logz.io:8071" +``` + +### Required parameters: + +* `token`: Your Logz.io token, which can be found under "settings" in your account. + +### Optional parameters: + +* `check_disk_space`: Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue. +* `disk_threshold`: If the queue_dir space crosses this threshold (in % of disk usage), the plugin will start dropping logs. +* `drain_duration`: Time to sleep between sending attempts. +* `queue_dir`: Metrics disk path. All the unsent metrics are saved to the disk in this location. +* `url`: Logz.io listener URL. \ No newline at end of file diff --git a/plugins/outputs/logzio/logzio.go b/plugins/outputs/logzio/logzio.go new file mode 100644 index 0000000000000..caec293b1c46f --- /dev/null +++ b/plugins/outputs/logzio/logzio.go @@ -0,0 +1,175 @@ +package logzio + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultLogzioURL = "https://listener.logz.io:8071" + + logzioDescription = "Send aggregate metrics to Logz.io" + logzioType = "telegraf" +) + +var sampleConfig = ` + ## Connection timeout, defaults to "5s" if not set. + timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Logz.io account token + token = "your logz.io token" # required + + ## Use your listener URL for your Logz.io account region. + # url = "https://listener.logz.io:8071" +` + +type Logzio struct { + Log telegraf.Logger `toml:"-"` + Timeout config.Duration `toml:"timeout"` + Token string `toml:"token"` + URL string `toml:"url"` + + tls.ClientConfig + client *http.Client +} + +type TimeSeries struct { + Series []*Metric +} + +type Metric struct { + Metric map[string]interface{} `json:"metrics"` + Dimensions map[string]string `json:"dimensions"` + Time time.Time `json:"@timestamp"` + Type string `json:"type"` +} + +// Connect to the Output +func (l *Logzio) Connect() error { + l.Log.Debug("Connecting to logz.io output...") + + if l.Token == "" || l.Token == "your logz.io token" { + return fmt.Errorf("token is required") + } + + tlsCfg, err := l.ClientConfig.TLSConfig() + if err != nil { + return err + } + + l.client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsCfg, + }, + Timeout: time.Duration(l.Timeout), + } + + return nil +} + +// Close any connections to the Output +func (l *Logzio) Close() error { + l.Log.Debug("Closing logz.io output") + return nil +} + +// Description returns a one-sentence description on the Output +func (l *Logzio) Description() string { + return logzioDescription +} + +// SampleConfig returns the default configuration of the Output +func (l *Logzio) SampleConfig() string { + return sampleConfig +} + +// Write takes in group of points to be written to the Output +func (l *Logzio) Write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { + return nil + } + + var buff bytes.Buffer + gz := gzip.NewWriter(&buff) + for _, metric := range metrics { + m := l.parseMetric(metric) + + serialized, err := json.Marshal(m) + if err != nil { + return fmt.Errorf("unable to marshal metric, %s", err.Error()) + } + + _, err = gz.Write(append(serialized, '\n')) + if err != nil { + return fmt.Errorf("unable to write gzip meric, %s", err.Error()) + } + } + + err := gz.Close() + if err != nil { + return fmt.Errorf("unable to close gzip, %s", err.Error()) + } + + return l.send(buff.Bytes()) +} + +func (l *Logzio) send(metrics []byte) error { + req, err := http.NewRequest("POST", l.authURL(), bytes.NewBuffer(metrics)) + if err != nil { + return fmt.Errorf("unable to create http.Request, %s", err.Error()) + } + req.Header.Add("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "gzip") + + resp, err := l.client.Do(req) + if err != nil { + return fmt.Errorf("error POSTing metrics, %s", err.Error()) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode > 209 { + return fmt.Errorf("received bad status code, %d", resp.StatusCode) + } + + return nil +} + +func (l *Logzio) authURL() string { + return fmt.Sprintf("%s/?token=%s", l.URL, l.Token) +} + +func (l *Logzio) parseMetric(metric telegraf.Metric) *Metric { + return &Metric{ + Metric: map[string]interface{}{ + metric.Name(): metric.Fields(), + }, + Dimensions: metric.Tags(), + Time: metric.Time(), + Type: logzioType, + } +} + +func init() { + outputs.Add("logzio", func() telegraf.Output { + return &Logzio{ + URL: defaultLogzioURL, + Timeout: config.Duration(time.Second * 5), + } + }) +} diff --git a/plugins/outputs/logzio/logzio_test.go b/plugins/outputs/logzio/logzio_test.go new file mode 100644 index 0000000000000..074192e06f0e2 --- /dev/null +++ b/plugins/outputs/logzio/logzio_test.go @@ -0,0 +1,94 @@ +package logzio + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +const ( + testToken = "123456789" + testURL = "https://logzio.com" +) + +func TestConnetWithoutToken(t *testing.T) { + l := &Logzio{ + URL: testURL, + Log: testutil.Logger{}, + } + err := l.Connect() + require.Error(t, err) +} + +func TestParseMetric(t *testing.T) { + l := &Logzio{} + for _, tm := range testutil.MockMetrics() { + lm := l.parseMetric(tm) + require.Equal(t, tm.Fields(), lm.Metric[tm.Name()]) + require.Equal(t, logzioType, lm.Type) + require.Equal(t, tm.Tags(), lm.Dimensions) + require.Equal(t, tm.Time(), lm.Time) + } +} + +func TestBadStatusCode(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + + l := &Logzio{ + Token: testToken, + URL: ts.URL, + Log: testutil.Logger{}, + } + + err := l.Connect() + require.NoError(t, err) + + err = l.Write(testutil.MockMetrics()) + require.Error(t, err) +} + +func TestWrite(t *testing.T) { + tm := testutil.TestMetric(float64(3.14), "test1") + var body bytes.Buffer + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gz, err := gzip.NewReader(r.Body) + require.NoError(t, err) + + _, err = io.Copy(&body, gz) + require.NoError(t, err) + + var lm Metric + err = json.Unmarshal(body.Bytes(), &lm) + require.NoError(t, err) + + require.Equal(t, tm.Fields(), lm.Metric[tm.Name()]) + require.Equal(t, logzioType, lm.Type) + require.Equal(t, tm.Tags(), lm.Dimensions) + require.Equal(t, tm.Time(), lm.Time) + + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + l := &Logzio{ + Token: testToken, + URL: ts.URL, + Log: testutil.Logger{}, + } + + err := l.Connect() + require.NoError(t, err) + + err = l.Write([]telegraf.Metric{tm}) + require.NoError(t, err) +} diff --git a/plugins/outputs/loki/README.md b/plugins/outputs/loki/README.md new file mode 100644 index 0000000000000..6c7eb91c8916a --- /dev/null +++ b/plugins/outputs/loki/README.md @@ -0,0 +1,36 @@ +# Loki Output Plugin + +This plugin sends logs to Loki, using metric name and tags as labels, +log line will content all fields in `key="value"` format which is easily parsable with `logfmt` parser in Loki. + +Logs within each stream are sorted by timestamp before being sent to Loki. + +### Configuration: + +```toml +# A plugin that can transmit logs to Loki +[[outputs.loki]] + ## The domain of Loki + domain = "https://loki.domain.tld" + + ## Endpoint to write api + # endpoint = "/loki/api/v1/push" + + ## Connection timeout, defaults to "5s" if not set. + # timeout = "5s" + + ## Basic auth credential + # username = "loki" + # password = "pass" + + ## Additional HTTP headers + # http_headers = {"X-Scope-OrgID" = "1"} + + ## If the request must be gzip encoded + # gzip_request = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +``` diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go new file mode 100644 index 0000000000000..fcf96e55f6429 --- /dev/null +++ b/plugins/outputs/loki/loki.go @@ -0,0 +1,217 @@ +package loki + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "sort" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +const ( + defaultEndpoint = "/loki/api/v1/push" + defaultClientTimeout = 5 * time.Second +) + +var sampleConfig = ` + ## The domain of Loki + domain = "https://loki.domain.tld" + + ## Endpoint to write api + # endpoint = "/loki/api/v1/push" + + ## Connection timeout, defaults to "5s" if not set. + # timeout = "5s" + + ## Basic auth credential + # username = "loki" + # password = "pass" + + ## Additional HTTP headers + # http_headers = {"X-Scope-OrgID" = "1"} + + ## If the request must be gzip encoded + # gzip_request = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +` + +type Loki struct { + Domain string `toml:"domain"` + Endpoint string `toml:"endpoint"` + Timeout config.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"http_headers"` + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` + GZipRequest bool `toml:"gzip_request"` + + url string + client *http.Client + tls.ClientConfig +} + +func (l *Loki) SampleConfig() string { + return sampleConfig +} + +func (l *Loki) Description() string { + return "Send logs to Loki" +} + +func (l *Loki) createClient(ctx context.Context) (*http.Client, error) { + tlsCfg, err := l.ClientConfig.TLSConfig() + if err != nil { + return nil, fmt.Errorf("tls config fail: %w", err) + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: time.Duration(l.Timeout), + } + + if l.ClientID != "" && l.ClientSecret != "" && l.TokenURL != "" { + oauthConfig := clientcredentials.Config{ + ClientID: l.ClientID, + ClientSecret: l.ClientSecret, + TokenURL: l.TokenURL, + Scopes: l.Scopes, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, client) + client = oauthConfig.Client(ctx) + } + + return client, nil +} + +func (l *Loki) Connect() (err error) { + if l.Domain == "" { + return fmt.Errorf("domain is required") + } + + if l.Endpoint == "" { + l.Endpoint = defaultEndpoint + } + + l.url = fmt.Sprintf("%s%s", l.Domain, l.Endpoint) + + if l.Timeout == 0 { + l.Timeout = config.Duration(defaultClientTimeout) + } + + ctx := context.Background() + l.client, err = l.createClient(ctx) + if err != nil { + return fmt.Errorf("http client fail: %w", err) + } + + return +} + +func (l *Loki) Close() error { + l.client.CloseIdleConnections() + + return nil +} + +func (l *Loki) Write(metrics []telegraf.Metric) error { + s := Streams{} + + sort.SliceStable(metrics, func(i, j int) bool { + return metrics[i].Time().Before(metrics[j].Time()) + }) + + for _, m := range metrics { + m.AddTag("__name", m.Name()) + + tags := m.TagList() + var line string + + for _, f := range m.FieldList() { + line += fmt.Sprintf("%s=\"%v\" ", f.Key, f.Value) + } + + s.insertLog(tags, Log{fmt.Sprintf("%d", m.Time().UnixNano()), line}) + } + + return l.write(s) +} + +func (l *Loki) write(s Streams) error { + bs, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("json.Marshal: %w", err) + } + + var reqBodyBuffer io.Reader = bytes.NewBuffer(bs) + + if l.GZipRequest { + rc, err := internal.CompressWithGzip(reqBodyBuffer) + if err != nil { + return err + } + defer rc.Close() + reqBodyBuffer = rc + } + + req, err := http.NewRequest(http.MethodPost, l.url, reqBodyBuffer) + if err != nil { + return err + } + + if l.Username != "" { + req.SetBasicAuth(l.Username, l.Password) + } + + for k, v := range l.Headers { + if strings.ToLower(k) == "host" { + req.Host = v + } + req.Header.Set(k, v) + } + + req.Header.Set("User-Agent", internal.ProductToken()) + req.Header.Set("Content-Type", "application/json") + if l.GZipRequest { + req.Header.Set("Content-Encoding", "gzip") + } + + resp, err := l.client.Do(req) + if err != nil { + return err + } + _ = resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("when writing to [%s] received status code: %d", l.url, resp.StatusCode) + } + + return nil +} + +func init() { + outputs.Add("loki", func() telegraf.Output { + return &Loki{} + }) +} diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go new file mode 100644 index 0000000000000..6f0678e8dd4b5 --- /dev/null +++ b/plugins/outputs/loki/loki_test.go @@ -0,0 +1,428 @@ +package loki + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" +) + +func getMetric() telegraf.Metric { + return testutil.MustMetric( + "log", + map[string]string{ + "key1": "value1", + }, + map[string]interface{}{ + "line": "my log", + "field": 3.14, + }, + time.Unix(123, 0), + ) +} + +func getOutOfOrderMetrics() []telegraf.Metric { + return []telegraf.Metric{ + testutil.MustMetric( + "log", + map[string]string{ + "key1": "value1", + }, + map[string]interface{}{ + "line": "newer log", + "field": 3.14, + }, + time.Unix(1230, 0), + ), + testutil.MustMetric( + "log", + map[string]string{ + "key1": "value1", + }, + map[string]interface{}{ + "line": "older log", + "field": 3.14, + }, + time.Unix(456, 0), + ), + } +} + +func TestStatusCode(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + statusCode int + errFunc func(t *testing.T, err error) + }{ + { + name: "success", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: http.StatusNoContent, + errFunc: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "1xx status is an error", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: 103, + errFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + { + name: "3xx status is an error", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: http.StatusMultipleChoices, + errFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + { + name: "4xx status is an error", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: http.StatusMultipleChoices, + errFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.statusCode) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + tt.errFunc(t, err) + }) + } +} + +func TestContentType(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + expected string + }{ + { + name: "default is application/json", + plugin: &Loki{ + Domain: u.String(), + }, + expected: "application/json", + }, + { + name: "overwrite content_type", + plugin: &Loki{ + Domain: u.String(), + Headers: map[string]string{"Content-Type": "plain/text"}, + }, + // plugin force content-type + expected: "application/json", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, tt.expected, r.Header.Get("Content-Type")) + w.WriteHeader(http.StatusOK) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestContentEncodingGzip(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + expected string + }{ + { + name: "default is no content encoding", + plugin: &Loki{ + Domain: u.String(), + }, + expected: "", + }, + { + name: "overwrite content_encoding", + plugin: &Loki{ + Domain: u.String(), + GZipRequest: true, + }, + expected: "gzip", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, tt.expected, r.Header.Get("Content-Encoding")) + + body := r.Body + var err error + if r.Header.Get("Content-Encoding") == "gzip" { + body, err = gzip.NewReader(r.Body) + require.NoError(t, err) + } + + payload, err := io.ReadAll(body) + require.NoError(t, err) + + var s Request + err = json.Unmarshal(payload, &s) + require.NoError(t, err) + require.Len(t, s.Streams, 1) + require.Len(t, s.Streams[0].Logs, 1) + require.Len(t, s.Streams[0].Logs[0], 2) + require.Equal(t, map[string]string{"__name": "log", "key1": "value1"}, s.Streams[0].Labels) + require.Equal(t, "123000000000", s.Streams[0].Logs[0][0]) + require.Contains(t, s.Streams[0].Logs[0][1], "line=\"my log\"") + require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") + + w.WriteHeader(http.StatusNoContent) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestBasicAuth(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + }{ + { + name: "default", + plugin: &Loki{ + Domain: u.String(), + }, + }, + { + name: "username and password", + plugin: &Loki{ + Domain: u.String(), + Username: "username", + Password: "pa$$word", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + username, password, _ := r.BasicAuth() + require.Equal(t, tt.plugin.Username, username) + require.Equal(t, tt.plugin.Password, password) + w.WriteHeader(http.StatusOK) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + +func TestOAuthClientCredentialsGrant(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + var token = "2YotnFZFEjr1zCsicMWpAA" + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + tokenHandler TestHandlerFunc + handler TestHandlerFunc + }{ + { + name: "no credentials", + plugin: &Loki{ + Domain: u.String(), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Len(t, r.Header["Authorization"], 0) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "success", + plugin: &Loki{ + Domain: u.String(), + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + w.Write([]byte(values.Encode())) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case defaultEndpoint: + tt.handler(t, w, r) + case "/token": + tt.tokenHandler(t, w, r) + } + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestDefaultUserAgent(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + t.Run("default-user-agent", func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) + w.WriteHeader(http.StatusOK) + }) + + client := &Loki{ + Domain: u.String(), + } + + err = client.Connect() + require.NoError(t, err) + + err = client.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) +} + +func TestMetricSorting(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + t.Run("out of order metrics", func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body := r.Body + var err error + + payload, err := io.ReadAll(body) + require.NoError(t, err) + + var s Request + err = json.Unmarshal(payload, &s) + require.NoError(t, err) + require.Len(t, s.Streams, 1) + require.Len(t, s.Streams[0].Logs, 2) + require.Len(t, s.Streams[0].Logs[0], 2) + require.Equal(t, map[string]string{"__name": "log", "key1": "value1"}, s.Streams[0].Labels) + require.Equal(t, "456000000000", s.Streams[0].Logs[0][0]) + require.Contains(t, s.Streams[0].Logs[0][1], "line=\"older log\"") + require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") + require.Equal(t, "1230000000000", s.Streams[0].Logs[1][0]) + require.Contains(t, s.Streams[0].Logs[1][1], "line=\"newer log\"") + require.Contains(t, s.Streams[0].Logs[1][1], "field=\"3.14\"") + + w.WriteHeader(http.StatusNoContent) + }) + + client := &Loki{ + Domain: u.String(), + } + + err = client.Connect() + require.NoError(t, err) + + err = client.Write(getOutOfOrderMetrics()) + require.NoError(t, err) + }) +} diff --git a/plugins/outputs/loki/stream.go b/plugins/outputs/loki/stream.go new file mode 100644 index 0000000000000..4f9f9c07269c6 --- /dev/null +++ b/plugins/outputs/loki/stream.go @@ -0,0 +1,70 @@ +package loki + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/influxdata/telegraf" +) + +type ( + Log []string + + Streams map[string]*Stream + + Stream struct { + Labels map[string]string `json:"stream"` + Logs []Log `json:"values"` + } + + Request struct { + Streams []Stream `json:"streams"` + } +) + +func (s Streams) insertLog(ts []*telegraf.Tag, l Log) { + key := uniqKeyFromTagList(ts) + + if _, ok := s[key]; !ok { + s[key] = newStream(ts) + } + + s[key].Logs = append(s[key].Logs, l) +} + +func (s Streams) MarshalJSON() ([]byte, error) { + r := Request{ + Streams: make([]Stream, 0, len(s)), + } + + for _, stream := range s { + r.Streams = append(r.Streams, *stream) + } + + return json.Marshal(r) +} + +func uniqKeyFromTagList(ts []*telegraf.Tag) (k string) { + for _, t := range ts { + k += fmt.Sprintf("%s-%s-", + strings.ReplaceAll(t.Key, "-", "--"), + strings.ReplaceAll(t.Value, "-", "--"), + ) + } + + return k +} + +func newStream(ts []*telegraf.Tag) *Stream { + s := &Stream{ + Logs: make([]Log, 0), + Labels: map[string]string{}, + } + + for _, t := range ts { + s.Labels[t.Key] = t.Value + } + + return s +} diff --git a/plugins/outputs/loki/stream_test.go b/plugins/outputs/loki/stream_test.go new file mode 100644 index 0000000000000..7a47de5ccd746 --- /dev/null +++ b/plugins/outputs/loki/stream_test.go @@ -0,0 +1,157 @@ +package loki + +import ( + "testing" + + "github.com/influxdata/telegraf" + "github.com/stretchr/testify/require" +) + +type tuple struct { + key, value string +} + +func generateLabelsAndTag(tt ...tuple) (map[string]string, []*telegraf.Tag) { + labels := map[string]string{} + var tags []*telegraf.Tag + + for _, t := range tt { + labels[t.key] = t.value + tags = append(tags, &telegraf.Tag{Key: t.key, Value: t.value}) + } + + return labels, tags +} + +func TestGenerateLabelsAndTag(t *testing.T) { + labels, tags := generateLabelsAndTag( + tuple{key: "key1", value: "value1"}, + tuple{key: "key2", value: "value2"}, + tuple{key: "key3", value: "value3"}, + ) + + expectedTags := []*telegraf.Tag{ + {Key: "key1", Value: "value1"}, + {Key: "key2", Value: "value2"}, + {Key: "key3", Value: "value3"}, + } + + require.Len(t, labels, 3) + require.Len(t, tags, 3) + require.Equal(t, map[string]string{"key1": "value1", "key2": "value2", "key3": "value3"}, labels) + require.Equal(t, map[string]string{"key1": "value1", "key2": "value2", "key3": "value3"}, labels) + require.Equal(t, expectedTags, tags) +} + +func TestStream_insertLog(t *testing.T) { + s := Streams{} + log1 := Log{"123", "this log isn't useful"} + log2 := Log{"124", "this log isn't useful neither"} + log3 := Log{"122", "again"} + + key1 := "key1-value1-key2-value2-key3-value3-" + labels1, tags1 := generateLabelsAndTag( + tuple{key: "key1", value: "value1"}, + tuple{key: "key2", value: "value2"}, + tuple{key: "key3", value: "value3"}, + ) + + key2 := "key2-value2-" + labels2, tags2 := generateLabelsAndTag( + tuple{key: "key2", value: "value2"}, + ) + + s.insertLog(tags1, log1) + + require.Len(t, s, 1) + require.Contains(t, s, key1) + require.Len(t, s[key1].Logs, 1) + require.Equal(t, labels1, s[key1].Labels) + require.Equal(t, "123", s[key1].Logs[0][0]) + require.Equal(t, "this log isn't useful", s[key1].Logs[0][1]) + + s.insertLog(tags1, log2) + + require.Len(t, s, 1) + require.Len(t, s[key1].Logs, 2) + require.Equal(t, "124", s[key1].Logs[1][0]) + require.Equal(t, "this log isn't useful neither", s[key1].Logs[1][1]) + + s.insertLog(tags2, log3) + + require.Len(t, s, 2) + require.Contains(t, s, key2) + require.Len(t, s[key2].Logs, 1) + require.Equal(t, labels2, s[key2].Labels) + require.Equal(t, "122", s[key2].Logs[0][0]) + require.Equal(t, "again", s[key2].Logs[0][1]) +} + +func TestUniqKeyFromTagList(t *testing.T) { + tests := []struct { + in []*telegraf.Tag + out string + }{ + { + in: []*telegraf.Tag{ + {Key: "key1", Value: "value1"}, + {Key: "key2", Value: "value2"}, + {Key: "key3", Value: "value3"}, + }, + out: "key1-value1-key2-value2-key3-value3-", + }, + { + in: []*telegraf.Tag{ + {Key: "key1", Value: "value1"}, + {Key: "key3", Value: "value3"}, + {Key: "key4", Value: "value4"}, + }, + out: "key1-value1-key3-value3-key4-value4-", + }, + { + in: []*telegraf.Tag{ + {Key: "target", Value: "local"}, + {Key: "host", Value: "host"}, + {Key: "service", Value: "dns"}, + }, + out: "target-local-host-host-service-dns-", + }, + { + in: []*telegraf.Tag{ + {Key: "target", Value: "localhost"}, + {Key: "hostservice", Value: "dns"}, + }, + out: "target-localhost-hostservice-dns-", + }, + { + in: []*telegraf.Tag{ + {Key: "target-local", Value: "host-"}, + }, + out: "target--local-host---", + }, + } + + for _, test := range tests { + require.Equal(t, test.out, uniqKeyFromTagList(test.in)) + } +} + +func Test_newStream(t *testing.T) { + labels, tags := generateLabelsAndTag( + tuple{key: "key1", value: "value1"}, + tuple{key: "key2", value: "value2"}, + tuple{key: "key3", value: "value3"}, + ) + + s := newStream(tags) + + require.Empty(t, s.Logs) + require.Equal(t, s.Labels, labels) +} + +func Test_newStream_noTag(t *testing.T) { + s := newStream(nil) + + require.Empty(t, s.Logs) + require.Empty(t, s.Labels) +} diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index abb770f068d4f..f82d7597c5bea 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -40,6 +40,12 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ## When true, messages will have RETAIN flag set. # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. # data_format = "influx" ``` @@ -62,3 +68,4 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message. * `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) +* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 0e07b1bca8ab7..54203ee0dba66 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -9,12 +9,17 @@ import ( paho "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) +const ( + defaultKeepAlive = 0 +) + var sampleConfig = ` servers = ["localhost:1883"] # required. @@ -54,6 +59,12 @@ var sampleConfig = ` ## actually reads it # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -66,13 +77,14 @@ type MQTT struct { Username string Password string Database string - Timeout internal.Duration + Timeout config.Duration TopicPrefix string QoS int `toml:"qos"` ClientID string `toml:"client_id"` tls.ClientConfig - BatchMessage bool `toml:"batch"` - Retain bool `toml:"retain"` + BatchMessage bool `toml:"batch"` + Retain bool `toml:"retain"` + KeepAlive int64 `toml:"keep_alive"` client paho.Client opts *paho.ClientOptions @@ -180,7 +192,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { func (m *MQTT) publish(topic string, body []byte) error { token := m.client.Publish(topic, byte(m.QoS), m.Retain, body) - token.WaitTimeout(m.Timeout.Duration) + token.WaitTimeout(time.Duration(m.Timeout)) if token.Error() != nil { return token.Error() } @@ -189,12 +201,12 @@ func (m *MQTT) publish(topic string, body []byte) error { func (m *MQTT) createOpts() (*paho.ClientOptions, error) { opts := paho.NewClientOptions() - opts.KeepAlive = 0 + opts.KeepAlive = m.KeepAlive - if m.Timeout.Duration < time.Second { - m.Timeout.Duration = 5 * time.Second + if m.Timeout < config.Duration(time.Second) { + m.Timeout = config.Duration(5 * time.Second) } - opts.WriteTimeout = m.Timeout.Duration + opts.WriteTimeout = time.Duration(m.Timeout) if m.ClientID != "" { opts.SetClientID(m.ClientID) @@ -236,6 +248,8 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { func init() { outputs.Add("mqtt", func() telegraf.Output { - return &MQTT{} + return &MQTT{ + KeepAlive: defaultKeepAlive, + } }) } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 260eb0c640c54..fd36d6d0577ac 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -19,6 +19,7 @@ func TestConnectAndWrite(t *testing.T) { m := &MQTT{ Servers: []string{url}, serializer: s, + KeepAlive: 30, } // Verify that we can connect to the MQTT broker diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md index c5539900b02e0..1fb1a2b4b96ae 100644 --- a/plugins/outputs/nats/README.md +++ b/plugins/outputs/nats/README.md @@ -6,6 +6,10 @@ This plugin writes to a (list of) specified NATS instance(s). [[outputs.nats]] ## URLs of NATS servers servers = ["nats://localhost:4222"] + + ## Optional client name + # name = "" + ## Optional credentials # username = "" # password = "" diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index bf1baae339876..f4cf35b16e4f7 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -15,6 +15,7 @@ import ( type NATS struct { Servers []string `toml:"servers"` Secure bool `toml:"secure"` + Name string `toml:"name"` Username string `toml:"username"` Password string `toml:"password"` Credentials string `toml:"credentials"` @@ -30,6 +31,9 @@ var sampleConfig = ` ## URLs of NATS servers servers = ["nats://localhost:4222"] + ## Optional client name + # name = "" + ## Optional credentials # username = "" # password = "" @@ -69,10 +73,18 @@ func (n *NATS) Connect() error { } // override authentication, if any was specified - if n.Username != "" { + if n.Username != "" && n.Password != "" { opts = append(opts, nats.UserInfo(n.Username, n.Password)) } + if n.Credentials != "" { + opts = append(opts, nats.UserCredentials(n.Credentials)) + } + + if n.Name != "" { + opts = append(opts, nats.Name(n.Name)) + } + if n.Secure { tlsConfig, err := n.ClientConfig.TLSConfig() if err != nil { diff --git a/plugins/outputs/nats/nats_test.go b/plugins/outputs/nats/nats_test.go index 773dbaa6efdbd..30004f6ae543d 100644 --- a/plugins/outputs/nats/nats_test.go +++ b/plugins/outputs/nats/nats_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -17,6 +17,7 @@ func TestConnectAndWrite(t *testing.T) { s, _ := serializers.NewInfluxSerializer() n := &NATS{ Servers: server, + Name: "telegraf", Subject: "telegraf", serializer: s, } diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md index fbafd06adb8d4..e15bedb4bdcb4 100644 --- a/plugins/outputs/newrelic/README.md +++ b/plugins/outputs/newrelic/README.md @@ -17,6 +17,14 @@ Telegraf minimum version: Telegraf 1.15.0 ## Timeout for writes to the New Relic API. # timeout = "15s" + + ## HTTP Proxy override. If unset use values from the standard + ## proxy environment variables to determine proxy, if any. + # http_proxy = "http://corporate.proxy:3128" + + ## Metric URL override to enable geographic location endpoints. + # If not set use values from the standard + # metric_url = "https://metric-api.newrelic.com/metric/v1" ``` [Metrics API]: https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index da000c222c823..02b2b9c3ff0ae 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -5,10 +5,11 @@ import ( "context" "fmt" "net/http" + "net/url" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" "github.com/newrelic/newrelic-telemetry-sdk-go/cumulative" "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" @@ -16,15 +17,17 @@ import ( // NewRelic nr structure type NewRelic struct { - InsightsKey string `toml:"insights_key"` - MetricPrefix string `toml:"metric_prefix"` - Timeout internal.Duration `toml:"timeout"` + InsightsKey string `toml:"insights_key"` + MetricPrefix string `toml:"metric_prefix"` + Timeout config.Duration `toml:"timeout"` + HTTPProxy string `toml:"http_proxy"` + MetricURL string `toml:"metric_url"` harvestor *telemetry.Harvester dc *cumulative.DeltaCalculator savedErrors map[int]interface{} errorCount int - Client http.Client `toml:"-"` + client http.Client `toml:"-"` } // Description returns a one-sentence description on the Output @@ -43,6 +46,14 @@ func (nr *NewRelic) SampleConfig() string { ## Timeout for writes to the New Relic API. # timeout = "15s" + + ## HTTP Proxy override. If unset use values from the standard + ## proxy environment variables to determine proxy, if any. + # http_proxy = "http://corporate.proxy:3128" + + ## Metric URL override to enable geographic location endpoints. + # If not set use values from the standard + # metric_url = "https://metric-api.newrelic.com/metric/v1" ` } @@ -51,14 +62,18 @@ func (nr *NewRelic) Connect() error { if nr.InsightsKey == "" { return fmt.Errorf("InsightKey is a required for newrelic") } - var err error + err := nr.initClient() + if err != nil { + return err + } + nr.harvestor, err = telemetry.NewHarvester(telemetry.ConfigAPIKey(nr.InsightsKey), telemetry.ConfigHarvestPeriod(0), func(cfg *telemetry.Config) { cfg.Product = "NewRelic-Telegraf-Plugin" cfg.ProductVersion = "1.0" - cfg.HarvestTimeout = nr.Timeout.Duration - cfg.Client = &nr.Client + cfg.HarvestTimeout = time.Duration(nr.Timeout) + cfg.Client = &nr.client cfg.ErrorLogger = func(e map[string]interface{}) { var errorString string for k, v := range e { @@ -67,6 +82,9 @@ func (nr *NewRelic) Connect() error { nr.errorCount++ nr.savedErrors[nr.errorCount] = errorString } + if nr.MetricURL != "" { + cfg.MetricsURLOverride = nr.MetricURL + } }) if err != nil { return fmt.Errorf("unable to connect to newrelic %v", err) @@ -79,7 +97,7 @@ func (nr *NewRelic) Connect() error { // Close any connections to the Output func (nr *NewRelic) Close() error { nr.errorCount = 0 - nr.Client.CloseIdleConnections() + nr.client.CloseIdleConnections() return nil } @@ -108,7 +126,7 @@ func (nr *NewRelic) Write(metrics []telegraf.Metric) error { case uint64: mvalue = float64(n) case float64: - mvalue = float64(n) + mvalue = n case bool: mvalue = float64(0) if n { @@ -119,7 +137,7 @@ func (nr *NewRelic) Write(metrics []telegraf.Metric) error { // we just skip continue default: - return fmt.Errorf("Undefined field type: %T", field.Value) + return fmt.Errorf("undefined field type: %T", field.Value) } switch metric.Type() { @@ -151,8 +169,28 @@ func (nr *NewRelic) Write(metrics []telegraf.Metric) error { func init() { outputs.Add("newrelic", func() telegraf.Output { return &NewRelic{ - Timeout: internal.Duration{Duration: time.Second * 15}, - Client: http.Client{}, + Timeout: config.Duration(time.Second * 15), } }) } + +func (nr *NewRelic) initClient() error { + if nr.HTTPProxy == "" { + nr.client = http.Client{} + return nil + } + + proxyURL, err := url.Parse(nr.HTTPProxy) + if err != nil { + return err + } + + transport := &http.Transport{ + Proxy: http.ProxyURL(proxyURL), + } + + nr.client = http.Client{ + Transport: transport, + } + return nil +} diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go index aa23950c72611..e545a1ac94e03 100644 --- a/plugins/outputs/newrelic/newrelic_test.go +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" "github.com/stretchr/testify/assert" @@ -17,7 +17,7 @@ func TestBasic(t *testing.T) { nr := &NewRelic{ MetricPrefix: "Test", InsightsKey: "12345", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } if testing.Short() { t.Skip("skipping test in short mode.") @@ -31,9 +31,6 @@ func TestBasic(t *testing.T) { } func TestNewRelic_Write(t *testing.T) { - type args struct { - metrics []telegraf.Metric - } tests := []struct { name string metrics []telegraf.Metric @@ -164,7 +161,23 @@ func TestNewRelic_Connect(t *testing.T) { name: "Test: Insights key and Timeout", newrelic: &NewRelic{ InsightsKey: "12312133", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + }, + wantErr: false, + }, + { + name: "Test: HTTP Proxy", + newrelic: &NewRelic{ + InsightsKey: "12121212", + HTTPProxy: "https://my.proxy", + }, + wantErr: false, + }, + { + name: "Test: Metric URL ", + newrelic: &NewRelic{ + InsightsKey: "12121212", + MetricURL: "https://test.nr.com", }, wantErr: false, }, diff --git a/plugins/outputs/nsq/nsq_test.go b/plugins/outputs/nsq/nsq_test.go index e2b0fc31d43e5..f7f55ddf34d07 100644 --- a/plugins/outputs/nsq/nsq_test.go +++ b/plugins/outputs/nsq/nsq_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/opentelemetry/README.md b/plugins/outputs/opentelemetry/README.md new file mode 100644 index 0000000000000..e6b4ebdfc6aad --- /dev/null +++ b/plugins/outputs/opentelemetry/README.md @@ -0,0 +1,59 @@ +# OpenTelemetry Output Plugin + +This plugin sends metrics to [OpenTelemetry](https://opentelemetry.io) servers and agents via gRPC. + +### Configuration + +```toml +[[outputs.opentelemetry]] + ## Override the default (localhost:4317) OpenTelemetry gRPC service + ## address:port + # service_address = "localhost:4317" + + ## Override the default (5s) request timeout + # timeout = "5s" + + ## Optional TLS Config. + ## + ## Root certificates for verifying server certificates encoded in PEM format. + # tls_ca = "/etc/telegraf/ca.pem" + ## The public and private keypairs for the client encoded in PEM format. + ## May contain intermediate certificates. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS, but skip TLS chain and host verification. + # insecure_skip_verify = false + ## Send the specified TLS server name via SNI. + # tls_server_name = "foo.example.com" + + ## Override the default (gzip) compression used to send data. + ## Supports: "gzip", "none" + # compression = "gzip" + + ## Additional OpenTelemetry resource attributes + # [outputs.opentelemetry.attributes] + # "service.name" = "demo" + + ## Additional gRPC request metadata + # [outputs.opentelemetry.headers] + # key1 = "value1" +``` + +#### Schema + +The InfluxDB->OpenTelemetry conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) +and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/influx2otel) +are hosted at https://github.com/influxdata/influxdb-observability . + +For metrics, two input schemata exist. +Line protocol with measurement name `prometheus` is assumed to have a schema +matching [Prometheus input plugin](../../inputs/prometheus/README.md) when `metric_version = 2`. +Line protocol with other measurement names is assumed to have schema +matching [Prometheus input plugin](../../inputs/prometheus/README.md) when `metric_version = 1`. +If both schema assumptions fail, then the line protocol data is interpreted as: +- Metric type = gauge (or counter, if indicated by the input plugin) +- Metric name = `[measurement]_[field key]` +- Metric value = line protocol field value, cast to float +- Metric labels = line protocol tags + +Also see the [OpenTelemetry input plugin](../../inputs/opentelemetry/README.md). diff --git a/plugins/outputs/opentelemetry/logger.go b/plugins/outputs/opentelemetry/logger.go new file mode 100644 index 0000000000000..3db3621bcc672 --- /dev/null +++ b/plugins/outputs/opentelemetry/logger.go @@ -0,0 +1,16 @@ +package opentelemetry + +import ( + "strings" + + "github.com/influxdata/telegraf" +) + +type otelLogger struct { + telegraf.Logger +} + +func (l otelLogger) Debug(msg string, kv ...interface{}) { + format := msg + strings.Repeat(" %s=%q", len(kv)/2) + l.Logger.Debugf(format, kv...) +} diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go new file mode 100644 index 0000000000000..7cfe1341b3ff4 --- /dev/null +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -0,0 +1,198 @@ +package opentelemetry + +import ( + "context" + "time" + + "github.com/influxdata/influxdb-observability/common" + "github.com/influxdata/influxdb-observability/influx2otel" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" + "go.opentelemetry.io/collector/model/otlpgrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + // This causes the gRPC library to register gzip compression. + _ "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/metadata" +) + +type OpenTelemetry struct { + ServiceAddress string `toml:"service_address"` + + tls.ClientConfig + Timeout config.Duration `toml:"timeout"` + Compression string `toml:"compression"` + Headers map[string]string `toml:"headers"` + Attributes map[string]string `toml:"attributes"` + + Log telegraf.Logger `toml:"-"` + + metricsConverter *influx2otel.LineProtocolToOtelMetrics + grpcClientConn *grpc.ClientConn + metricsServiceClient otlpgrpc.MetricsClient + callOptions []grpc.CallOption +} + +const sampleConfig = ` + ## Override the default (localhost:4317) OpenTelemetry gRPC service + ## address:port + # service_address = "localhost:4317" + + ## Override the default (5s) request timeout + # timeout = "5s" + + ## Optional TLS Config. + ## + ## Root certificates for verifying server certificates encoded in PEM format. + # tls_ca = "/etc/telegraf/ca.pem" + ## The public and private keypairs for the client encoded in PEM format. + ## May contain intermediate certificates. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS, but skip TLS chain and host verification. + # insecure_skip_verify = false + ## Send the specified TLS server name via SNI. + # tls_server_name = "foo.example.com" + + ## Override the default (gzip) compression used to send data. + ## Supports: "gzip", "none" + # compression = "gzip" + + ## Additional OpenTelemetry resource attributes + # [outputs.opentelemetry.attributes] + # "service.name" = "demo" + + ## Additional gRPC request metadata + # [outputs.opentelemetry.headers] + # key1 = "value1" +` + +func (o *OpenTelemetry) SampleConfig() string { + return sampleConfig +} + +func (o *OpenTelemetry) Description() string { + return "Send OpenTelemetry metrics over gRPC" +} + +func (o *OpenTelemetry) Connect() error { + logger := &otelLogger{o.Log} + + if o.ServiceAddress == "" { + o.ServiceAddress = defaultServiceAddress + } + if o.Timeout <= 0 { + o.Timeout = defaultTimeout + } + if o.Compression == "" { + o.Compression = defaultCompression + } + + metricsConverter, err := influx2otel.NewLineProtocolToOtelMetrics(logger) + if err != nil { + return err + } + + var grpcTLSDialOption grpc.DialOption + if tlsConfig, err := o.ClientConfig.TLSConfig(); err != nil { + return err + } else if tlsConfig != nil { + grpcTLSDialOption = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)) + } else { + grpcTLSDialOption = grpc.WithInsecure() + } + + grpcClientConn, err := grpc.Dial(o.ServiceAddress, grpcTLSDialOption) + if err != nil { + return err + } + + metricsServiceClient := otlpgrpc.NewMetricsClient(grpcClientConn) + + o.metricsConverter = metricsConverter + o.grpcClientConn = grpcClientConn + o.metricsServiceClient = metricsServiceClient + + if o.Compression != "" && o.Compression != "none" { + o.callOptions = append(o.callOptions, grpc.UseCompressor(o.Compression)) + } + + return nil +} + +func (o *OpenTelemetry) Close() error { + if o.grpcClientConn != nil { + err := o.grpcClientConn.Close() + o.grpcClientConn = nil + return err + } + return nil +} + +func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { + batch := o.metricsConverter.NewBatch() + for _, metric := range metrics { + var vType common.InfluxMetricValueType + switch metric.Type() { + case telegraf.Gauge: + vType = common.InfluxMetricValueTypeGauge + case telegraf.Untyped: + vType = common.InfluxMetricValueTypeUntyped + case telegraf.Counter: + vType = common.InfluxMetricValueTypeSum + case telegraf.Histogram: + vType = common.InfluxMetricValueTypeHistogram + case telegraf.Summary: + vType = common.InfluxMetricValueTypeSummary + default: + o.Log.Warnf("unrecognized metric type %Q", metric.Type()) + continue + } + err := batch.AddPoint(metric.Name(), metric.Tags(), metric.Fields(), metric.Time(), vType) + if err != nil { + o.Log.Warnf("failed to add point: %s", err) + continue + } + } + + md := otlpgrpc.NewMetricsRequest() + md.SetMetrics(batch.GetMetrics()) + if md.Metrics().ResourceMetrics().Len() == 0 { + return nil + } + + if len(o.Attributes) > 0 { + for i := 0; i < md.Metrics().ResourceMetrics().Len(); i++ { + for k, v := range o.Attributes { + md.Metrics().ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) + } + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.Timeout)) + + if len(o.Headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(o.Headers)) + } + defer cancel() + _, err := o.metricsServiceClient.Export(ctx, md, o.callOptions...) + return err +} + +const ( + defaultServiceAddress = "localhost:4317" + defaultTimeout = config.Duration(5 * time.Second) + defaultCompression = "gzip" +) + +func init() { + outputs.Add("opentelemetry", func() telegraf.Output { + return &OpenTelemetry{ + ServiceAddress: defaultServiceAddress, + Timeout: defaultTimeout, + Compression: defaultCompression, + } + }) +} diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go new file mode 100644 index 0000000000000..c2f9f1980410d --- /dev/null +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -0,0 +1,142 @@ +package opentelemetry + +import ( + "context" + "net" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb-observability/common" + "github.com/influxdata/influxdb-observability/influx2otel" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func TestOpenTelemetry(t *testing.T) { + expect := pdata.NewMetrics() + { + rm := expect.ResourceMetrics().AppendEmpty() + rm.Resource().Attributes().InsertString("host.name", "potato") + rm.Resource().Attributes().InsertString("attr-key", "attr-val") + ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() + ilm.InstrumentationLibrary().SetName("My Library Name") + m := ilm.Metrics().AppendEmpty() + m.SetName("cpu_temp") + m.SetDataType(pdata.MetricDataTypeGauge) + dp := m.Gauge().DataPoints().AppendEmpty() + dp.Attributes().InsertString("foo", "bar") + dp.SetTimestamp(pdata.Timestamp(1622848686000000000)) + dp.SetDoubleVal(87.332) + } + m := newMockOtelService(t) + t.Cleanup(m.Cleanup) + + metricsConverter, err := influx2otel.NewLineProtocolToOtelMetrics(common.NoopLogger{}) + require.NoError(t, err) + plugin := &OpenTelemetry{ + ServiceAddress: m.Address(), + Timeout: config.Duration(time.Second), + Headers: map[string]string{"test": "header1"}, + Attributes: map[string]string{"attr-key": "attr-val"}, + metricsConverter: metricsConverter, + grpcClientConn: m.GrpcClient(), + metricsServiceClient: otlpgrpc.NewMetricsClient(m.GrpcClient()), + } + + input := testutil.MustMetric( + "cpu_temp", + map[string]string{ + "foo": "bar", + "otel.library.name": "My Library Name", + "host.name": "potato", + }, + map[string]interface{}{ + "gauge": 87.332, + }, + time.Unix(0, 1622848686000000000)) + + err = plugin.Write([]telegraf.Metric{input}) + if err != nil { + // TODO not sure why the service returns this error, but the data arrives as required by the test + // rpc error: code = Internal desc = grpc: error while marshaling: proto: Marshal called with nil + if !strings.Contains(err.Error(), "proto: Marshal called with nil") { + assert.NoError(t, err) + } + } + + got := m.GotMetrics() + + expectJSON, err := otlp.NewJSONMetricsMarshaler().MarshalMetrics(expect) + require.NoError(t, err) + + gotJSON, err := otlp.NewJSONMetricsMarshaler().MarshalMetrics(got) + require.NoError(t, err) + + assert.JSONEq(t, string(expectJSON), string(gotJSON)) +} + +var _ otlpgrpc.MetricsServer = (*mockOtelService)(nil) + +type mockOtelService struct { + t *testing.T + listener net.Listener + grpcServer *grpc.Server + grpcClient *grpc.ClientConn + + metrics pdata.Metrics +} + +func newMockOtelService(t *testing.T) *mockOtelService { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + grpcServer := grpc.NewServer() + + mockOtelService := &mockOtelService{ + t: t, + listener: listener, + grpcServer: grpcServer, + } + + otlpgrpc.RegisterMetricsServer(grpcServer, mockOtelService) + go func() { assert.NoError(t, grpcServer.Serve(listener)) }() + + grpcClient, err := grpc.Dial(listener.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) + mockOtelService.grpcClient = grpcClient + + return mockOtelService +} + +func (m *mockOtelService) Cleanup() { + assert.NoError(m.t, m.grpcClient.Close()) + m.grpcServer.Stop() +} + +func (m *mockOtelService) GrpcClient() *grpc.ClientConn { + return m.grpcClient +} + +func (m *mockOtelService) GotMetrics() pdata.Metrics { + return m.metrics +} + +func (m *mockOtelService) Address() string { + return m.listener.Addr().String() +} + +func (m *mockOtelService) Export(ctx context.Context, request otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { + m.metrics = request.Metrics().Clone() + ctxMetadata, ok := metadata.FromIncomingContext(ctx) + assert.Equal(m.t, []string{"header1"}, ctxMetadata.Get("test")) + assert.True(m.t, ok) + return otlpgrpc.MetricsResponse{}, nil +} diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index ae1e2a5362bc5..3d7fdf5cc5cc2 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -2,7 +2,6 @@ package opentsdb import ( "fmt" - "log" "math" "net" "net/url" @@ -23,22 +22,24 @@ var ( `%`, "-", "#", "-", "$", "-") - defaultHttpPath = "/api/put" + defaultHTTPPath = "/api/put" defaultSeparator = "_" ) type OpenTSDB struct { - Prefix string + Prefix string `toml:"prefix"` - Host string - Port int + Host string `toml:"host"` + Port int `toml:"port"` - HttpBatchSize int // deprecated httpBatchSize form in 1.8 - HttpPath string + HTTPBatchSize int `toml:"http_batch_size"` // deprecated httpBatchSize form in 1.8 + HTTPPath string `toml:"http_path"` - Debug bool + Debug bool `toml:"debug"` - Separator string + Separator string `toml:"separator"` + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -86,7 +87,7 @@ func (o *OpenTSDB) Connect() error { // Test Connection to OpenTSDB Server u, err := url.Parse(o.Host) if err != nil { - return fmt.Errorf("Error in parsing host url: %s", err.Error()) + return fmt.Errorf("error in parsing host url: %s", err.Error()) } uri := fmt.Sprintf("%s:%d", u.Host, o.Port) @@ -109,26 +110,26 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { u, err := url.Parse(o.Host) if err != nil { - return fmt.Errorf("Error in parsing host url: %s", err.Error()) + return fmt.Errorf("error in parsing host url: %s", err.Error()) } if u.Scheme == "" || u.Scheme == "tcp" { return o.WriteTelnet(metrics, u) } else if u.Scheme == "http" || u.Scheme == "https" { - return o.WriteHttp(metrics, u) + return o.WriteHTTP(metrics, u) } else { - return fmt.Errorf("Unknown scheme in host parameter.") + return fmt.Errorf("unknown scheme in host parameter") } } -func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { +func (o *OpenTSDB) WriteHTTP(metrics []telegraf.Metric, u *url.URL) error { http := openTSDBHttp{ Host: u.Host, Port: o.Port, Scheme: u.Scheme, User: u.User, - BatchSize: o.HttpBatchSize, - Path: o.HttpPath, + BatchSize: o.HTTPBatchSize, + Path: o.HTTPPath, Debug: o.Debug, } @@ -146,11 +147,11 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { continue } default: - log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value) + o.Log.Debugf("OpenTSDB does not support metric value: [%s] of type [%T].", value, value) continue } - metric := &HttpMetric{ + metric := &HTTPMetric{ Metric: sanitize(fmt.Sprintf("%s%s%s%s", o.Prefix, m.Name(), o.Separator, fieldName)), Tags: tags, @@ -164,11 +165,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { } } - if err := http.flush(); err != nil { - return err - } - - return nil + return http.flush() } func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { @@ -195,13 +192,13 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { continue } default: - log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value) + o.Log.Debugf("OpenTSDB does not support metric value: [%s] of type [%T].", value, value) continue } metricValue, buildError := buildValue(value) if buildError != nil { - log.Printf("E! OpenTSDB: %s\n", buildError.Error()) + o.Log.Errorf("OpenTSDB: %s", buildError.Error()) continue } @@ -234,9 +231,9 @@ func buildValue(v interface{}) (string, error) { var retv string switch p := v.(type) { case int64: - retv = IntToString(int64(p)) + retv = IntToString(p) case uint64: - retv = UIntToString(uint64(p)) + retv = UIntToString(p) case float64: retv = FloatToString(float64(p)) default: @@ -245,16 +242,16 @@ func buildValue(v interface{}) (string, error) { return retv, nil } -func IntToString(input_num int64) string { - return strconv.FormatInt(input_num, 10) +func IntToString(inputNum int64) string { + return strconv.FormatInt(inputNum, 10) } -func UIntToString(input_num uint64) string { - return strconv.FormatUint(input_num, 10) +func UIntToString(inputNum uint64) string { + return strconv.FormatUint(inputNum, 10) } -func FloatToString(input_num float64) string { - return strconv.FormatFloat(input_num, 'f', 6, 64) +func FloatToString(inputNum float64) string { + return strconv.FormatFloat(inputNum, 'f', 6, 64) } func (o *OpenTSDB) SampleConfig() string { @@ -279,7 +276,7 @@ func sanitize(value string) string { func init() { outputs.Add("opentsdb", func() telegraf.Output { return &OpenTSDB{ - HttpPath: defaultHttpPath, + HTTPPath: defaultHTTPPath, Separator: defaultSeparator, } }) diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index 4f971abb639aa..582a9bb85fc9a 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -6,14 +6,13 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/http/httputil" "net/url" ) -type HttpMetric struct { +type HTTPMetric struct { Metric string `json:"metric"` Timestamp int64 `json:"timestamp"` Value interface{} `json:"value"` @@ -68,7 +67,7 @@ func (r *requestBody) reset(debug bool) { r.empty = true } -func (r *requestBody) addMetric(metric *HttpMetric) error { +func (r *requestBody) addMetric(metric *HTTPMetric) error { if !r.empty { io.WriteString(r.w, ",") } @@ -92,7 +91,7 @@ func (r *requestBody) close() error { return nil } -func (o *openTSDBHttp) sendDataPoint(metric *HttpMetric) error { +func (o *openTSDBHttp) sendDataPoint(metric *HTTPMetric) error { if o.metricCounter == 0 { o.body.reset(o.Debug) } @@ -163,7 +162,7 @@ func (o *openTSDBHttp) flush() error { fmt.Printf("Received response\n%s\n\n", dump) } else { // Important so http client reuse connection for next request if need be. - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) } if resp.StatusCode/100 != 2 { diff --git a/plugins/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go index 096337c5c6648..89748d055d9d5 100644 --- a/plugins/outputs/opentsdb/opentsdb_test.go +++ b/plugins/outputs/opentsdb/opentsdb_test.go @@ -155,8 +155,8 @@ func BenchmarkHttpSend(b *testing.B) { Host: ts.URL, Port: port, Prefix: "", - HttpBatchSize: BatchSize, - HttpPath: "/api/put", + HTTPBatchSize: BatchSize, + HTTPPath: "/api/put", } b.ResetTimer() @@ -164,41 +164,38 @@ func BenchmarkHttpSend(b *testing.B) { o.Write(metrics) } } +func TestWriteIntegration(t *testing.T) { + t.Skip("Skip as OpenTSDB not running") -// func TestWrite(t *testing.T) { -// if testing.Short() { -// t.Skip("Skipping integration test in short mode") -// } - -// o := &OpenTSDB{ -// Host: testutil.GetLocalHost(), -// Port: 4242, -// Prefix: "prefix.test.", -// } - -// // Verify that we can connect to the OpenTSDB instance -// err := o.Connect() -// require.NoError(t, err) - -// // Verify that we can successfully write data to OpenTSDB -// err = o.Write(testutil.MockMetrics()) -// require.NoError(t, err) - -// // Verify positive and negative test cases of writing data -// metrics := testutil.MockMetrics() -// metrics = append(metrics, testutil.TestMetric(float64(1.0), -// "justametric.float")) -// metrics = append(metrics, testutil.TestMetric(int64(123456789), -// "justametric.int")) -// metrics = append(metrics, testutil.TestMetric(uint64(123456789012345), -// "justametric.uint")) -// metrics = append(metrics, testutil.TestMetric("Lorem Ipsum", -// "justametric.string")) -// metrics = append(metrics, testutil.TestMetric(float64(42.0), -// "justametric.anotherfloat")) -// metrics = append(metrics, testutil.TestMetric(float64(42.0), -// "metric w/ specialchars")) - -// err = o.Write(metrics) -// require.NoError(t, err) -// } + o := &OpenTSDB{ + Host: testutil.GetLocalHost(), + Port: 4242, + Prefix: "prefix.test.", + } + + // Verify that we can connect to the OpenTSDB instance + err := o.Connect() + require.NoError(t, err) + + // Verify that we can successfully write data to OpenTSDB + err = o.Write(testutil.MockMetrics()) + require.NoError(t, err) + + // Verify positive and negative test cases of writing data + metrics := testutil.MockMetrics() + metrics = append(metrics, testutil.TestMetric(float64(1.0), + "justametric.float")) + metrics = append(metrics, testutil.TestMetric(int64(123456789), + "justametric.int")) + metrics = append(metrics, testutil.TestMetric(uint64(123456789012345), + "justametric.uint")) + metrics = append(metrics, testutil.TestMetric("Lorem Ipsum", + "justametric.string")) + metrics = append(metrics, testutil.TestMetric(float64(42.0), + "justametric.anotherfloat")) + metrics = append(metrics, testutil.TestMetric(float64(42.0), + "metric w/ specialchars")) + + err = o.Write(metrics) + require.NoError(t, err) +} diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 9beaa062da1eb..844cf3f2d1790 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -14,7 +14,7 @@ all metrics on `/metrics` (default) to be polled by a Prometheus server. ## Prometheus format. When using the prometheus input, use the same value in ## both plugins to ensure metrics are round-tripped without modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 53713a02ba4e6..9c54c2dade83a 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -23,7 +24,7 @@ import ( var ( defaultListen = ":9273" defaultPath = "/metrics" - defaultExpirationInterval = internal.Duration{Duration: 60 * time.Second} + defaultExpirationInterval = config.Duration(60 * time.Second) ) var sampleConfig = ` @@ -34,7 +35,7 @@ var sampleConfig = ` ## Prometheus format. When using the prometheus input, use the same value in ## both plugins to ensure metrics are round-tripped without modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 @@ -79,16 +80,16 @@ type Collector interface { } type PrometheusClient struct { - Listen string `toml:"listen"` - MetricVersion int `toml:"metric_version"` - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - IPRange []string `toml:"ip_range"` - ExpirationInterval internal.Duration `toml:"expiration_interval"` - Path string `toml:"path"` - CollectorsExclude []string `toml:"collectors_exclude"` - StringAsLabel bool `toml:"string_as_label"` - ExportTimestamp bool `toml:"export_timestamp"` + Listen string `toml:"listen"` + MetricVersion int `toml:"metric_version"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + IPRange []string `toml:"ip_range"` + ExpirationInterval config.Duration `toml:"expiration_interval"` + Path string `toml:"path"` + CollectorsExclude []string `toml:"collectors_exclude"` + StringAsLabel bool `toml:"string_as_label"` + ExportTimestamp bool `toml:"export_timestamp"` tlsint.ServerConfig Log telegraf.Logger `toml:"-"` @@ -132,14 +133,13 @@ func (p *PrometheusClient) Init() error { default: fallthrough case 1: - p.Log.Warnf("Use of deprecated configuration: metric_version = 1; please update to metric_version = 2") - p.collector = v1.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.Log) + p.collector = v1.NewCollector(time.Duration(p.ExpirationInterval), p.StringAsLabel, p.Log) err := registry.Register(p.collector) if err != nil { return err } case 2: - p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.ExportTimestamp) + p.collector = v2.NewCollector(time.Duration(p.ExpirationInterval), p.StringAsLabel, p.ExportTimestamp) err := registry.Register(p.collector) if err != nil { return err @@ -159,12 +159,16 @@ func (p *PrometheusClient) Init() error { authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, "prometheus", onAuthError) rangeHandler := internal.IPRangeHandler(ipRange, onError) promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) + landingPageHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Telegraf Output Plugin: Prometheus Client ")) + }) mux := http.NewServeMux() if p.Path == "" { - p.Path = "/" + p.Path = "/metrics" } mux.Handle(p.Path, authHandler(rangeHandler(promHandler))) + mux.Handle("/", authHandler(rangeHandler(landingPageHandler))) tlsConfig, err := p.TLSConfig() if err != nil { @@ -183,9 +187,8 @@ func (p *PrometheusClient) Init() error { func (p *PrometheusClient) listen() (net.Listener, error) { if p.server.TLSConfig != nil { return tls.Listen("tcp", p.Listen, p.server.TLSConfig) - } else { - return net.Listen("tcp", p.Listen) } + return net.Listen("tcp", p.Listen) } func (p *PrometheusClient) Connect() error { diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index adf18c9f0f076..95fa97fb688b7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -2,9 +2,10 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "net/url" "strings" "testing" "time" @@ -260,7 +261,7 @@ rpc_duration_seconds_count 2693 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -391,7 +392,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -400,3 +401,29 @@ rpc_duration_seconds_count 2693 }) } } + +func TestLandingPage(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + output := PrometheusClient{ + Listen: ":0", + CollectorsExclude: []string{"process"}, + MetricVersion: 1, + Log: Logger, + } + expected := "Telegraf Output Plugin: Prometheus Client" + + err := output.Init() + require.NoError(t, err) + + err = output.Connect() + require.NoError(t, err) + + u, err := url.Parse(fmt.Sprintf("http://%s/", output.url.Host)) + resp, err := http.Get(u.String()) + require.NoError(t, err) + + actual, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, expected, strings.TrimSpace(string(actual))) +} diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 27be9103b28bd..c5ff76d4017a7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -321,7 +321,7 @@ cpu_usage_idle_count{cpu="cpu1"} 20 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -452,7 +452,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, diff --git a/plugins/outputs/prometheus_client/v1/collector.go b/plugins/outputs/prometheus_client/v1/collector.go index 7932bbc59f44d..a77f94d9ffc44 100644 --- a/plugins/outputs/prometheus_client/v1/collector.go +++ b/plugins/outputs/prometheus_client/v1/collector.go @@ -153,7 +153,6 @@ func CreateSampleID(tags map[string]string) SampleID { } func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { - for k := range sample.Labels { fam.LabelSet[k]++ } @@ -363,7 +362,6 @@ func (c *Collector) Add(metrics []telegraf.Metric) error { continue } c.addMetricFamily(point, sample, mname, sampleID) - } } } diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go index b28a4deab1cc9..a12c17571124c 100644 --- a/plugins/outputs/prometheus_client/v2/collector.go +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -59,11 +59,10 @@ func NewCollector(expire time.Duration, stringsAsLabel bool, exportTimestamp boo } } -func (c *Collector) Describe(ch chan<- *prometheus.Desc) { +func (c *Collector) Describe(_ chan<- *prometheus.Desc) { // Sending no descriptor at all marks the Collector as "unchecked", // i.e. no checks will be performed at registration time, and the // Collector may yield any Metric it sees fit in its Collect method. - return } func (c *Collector) Collect(ch chan<- prometheus.Metric) { diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index 1738ca537bab0..bad1e44a0c1a1 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -2,7 +2,6 @@ package riemann import ( "fmt" - "log" "net/url" "os" "sort" @@ -11,20 +10,21 @@ import ( "github.com/amir/raidman" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) type Riemann struct { - URL string - TTL float32 - Separator string - MeasurementAsAttribute bool - StringAsState bool - TagKeys []string - Tags []string - DescriptionText string - Timeout internal.Duration + URL string `toml:"url"` + TTL float32 `toml:"ttl"` + Separator string `toml:"separator"` + MeasurementAsAttribute bool `toml:"measurement_as_attribute"` + StringAsState bool `toml:"string_as_state"` + TagKeys []string `toml:"tag_keys"` + Tags []string `toml:"tags"` + DescriptionText string `toml:"description_text"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` client *raidman.Client } @@ -63,12 +63,12 @@ var sampleConfig = ` ` func (r *Riemann) Connect() error { - parsed_url, err := url.Parse(r.URL) + parsedURL, err := url.Parse(r.URL) if err != nil { return err } - client, err := raidman.DialWithTimeout(parsed_url.Scheme, parsed_url.Host, r.Timeout.Duration) + client, err := raidman.DialWithTimeout(parsedURL.Scheme, parsedURL.Host, time.Duration(r.Timeout)) if err != nil { r.client = nil return err @@ -101,7 +101,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { if r.client == nil { if err := r.Connect(); err != nil { - return fmt.Errorf("Failed to (re)connect to Riemann: %s", err.Error()) + return fmt.Errorf("failed to (re)connect to Riemann: %s", err.Error()) } } @@ -109,14 +109,12 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { var events []*raidman.Event for _, m := range metrics { evs := r.buildRiemannEvents(m) - for _, ev := range evs { - events = append(events, ev) - } + events = append(events, evs...) } if err := r.client.SendMulti(events); err != nil { r.Close() - return fmt.Errorf("Failed to send riemann message: %s", err) + return fmt.Errorf("failed to send riemann message: %s", err) } return nil } @@ -145,18 +143,18 @@ func (r *Riemann) buildRiemannEvents(m telegraf.Metric) []*raidman.Event { Tags: r.tags(m.Tags()), } - switch value.(type) { + switch value := value.(type) { case string: // only send string metrics if explicitly enabled, skip otherwise if !r.StringAsState { - log.Printf("D! Riemann event states disabled, skipping metric value [%s]\n", value) + r.Log.Debugf("Riemann event states disabled, skipping metric value [%s]", value) continue } - event.State = value.(string) + event.State = value case int, int64, uint64, float32, float64: event.Metric = value default: - log.Printf("D! Riemann does not support metric value [%s]\n", value) + r.Log.Debugf("Riemann does not support metric value [%s]", value) continue } @@ -219,7 +217,7 @@ func (r *Riemann) tags(tags map[string]string) []string { func init() { outputs.Add("riemann", func() telegraf.Output { return &Riemann{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index 61b7b37965e71..b56fb33e114be 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -4,6 +4,8 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/amir/raidman" "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/require" @@ -12,7 +14,9 @@ import ( func TestAttributes(t *testing.T) { tags := map[string]string{"tag1": "value1", "tag2": "value2"} - r := &Riemann{} + r := &Riemann{ + Log: testutil.Logger{}, + } require.Equal(t, map[string]string{"tag1": "value1", "tag2": "value2"}, r.attributes("test", tags)) @@ -27,6 +31,7 @@ func TestAttributes(t *testing.T) { func TestService(t *testing.T) { r := &Riemann{ Separator: "/", + Log: testutil.Logger{}, } require.Equal(t, "test/value", r.service("test", "value")) @@ -41,6 +46,7 @@ func TestTags(t *testing.T) { // all tag values plus additional tag should be present r := &Riemann{ Tags: []string{"test"}, + Log: testutil.Logger{}, } require.Equal(t, []string{"test", "value1", "value2"}, @@ -67,10 +73,11 @@ func TestMetricEvents(t *testing.T) { MeasurementAsAttribute: false, DescriptionText: "metrics from telegraf", Tags: []string{"telegraf"}, + Log: testutil.Logger{}, } // build a single event - m, _ := metric.New( + m := metric.New( "test1", map[string]string{"tag1": "value1", "host": "abc123"}, map[string]interface{}{"value": 5.6}, @@ -95,7 +102,7 @@ func TestMetricEvents(t *testing.T) { require.Equal(t, expectedEvent, events[0]) // build 2 events - m, _ = metric.New( + m = metric.New( "test2", map[string]string{"host": "xyz987"}, map[string]interface{}{"point": 1}, @@ -126,10 +133,11 @@ func TestMetricEvents(t *testing.T) { func TestStateEvents(t *testing.T) { r := &Riemann{ MeasurementAsAttribute: true, + Log: testutil.Logger{}, } // string metrics will be skipped unless explicitly enabled - m, _ := metric.New( + m := metric.New( "test", map[string]string{"host": "host"}, map[string]interface{}{"value": "running"}, diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann.go index a1b140436430a..7fe80297de4d9 100644 --- a/plugins/outputs/riemann_legacy/riemann.go +++ b/plugins/outputs/riemann_legacy/riemann.go @@ -2,7 +2,6 @@ package riemann_legacy import ( "fmt" - "log" "os" "sort" "strings" @@ -12,12 +11,13 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" ) -const deprecationMsg = "E! Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion." +const deprecationMsg = "Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion." type Riemann struct { - URL string - Transport string - Separator string + URL string `toml:"url"` + Transport string `toml:"transport"` + Separator string `toml:"separator"` + Log telegraf.Logger `toml:"-"` client *raidman.Client } @@ -32,7 +32,7 @@ var sampleConfig = ` ` func (r *Riemann) Connect() error { - log.Printf(deprecationMsg) + r.Log.Error(deprecationMsg) c, err := raidman.Dial(r.Transport, r.URL) if err != nil { @@ -62,7 +62,7 @@ func (r *Riemann) Description() string { } func (r *Riemann) Write(metrics []telegraf.Metric) error { - log.Printf(deprecationMsg) + r.Log.Error(deprecationMsg) if len(metrics) == 0 { return nil } @@ -70,23 +70,20 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { if r.client == nil { err := r.Connect() if err != nil { - return fmt.Errorf("FAILED to (re)connect to Riemann. Error: %s\n", err) + return fmt.Errorf("failed to (re)connect to Riemann, error: %s", err) } } var events []*raidman.Event for _, p := range metrics { evs := buildEvents(p, r.Separator) - for _, ev := range evs { - events = append(events, ev) - } + events = append(events, evs...) } var senderr = r.client.SendMulti(events) if senderr != nil { r.Close() // always returns nil - return fmt.Errorf("FAILED to send riemann message (will try to reconnect). Error: %s\n", - senderr) + return fmt.Errorf("failed to send riemann message (will try to reconnect), error: %s", senderr) } return nil @@ -110,9 +107,9 @@ func buildEvents(p telegraf.Metric, s string) []*raidman.Event { Service: serviceName(s, p.Name(), p.Tags(), fieldName), } - switch value.(type) { + switch value := value.(type) { case string: - event.State = value.(string) + event.State = value default: event.Metric = value } @@ -141,7 +138,7 @@ func serviceName(s string, n string, t map[string]string, f string) string { tagStrings = append(tagStrings, t[tagName]) } } - var tagString string = strings.Join(tagStrings, s) + var tagString = strings.Join(tagStrings, s) if tagString != "" { serviceStrings = append(serviceStrings, tagString) } diff --git a/plugins/outputs/riemann_legacy/riemann_test.go b/plugins/outputs/riemann_legacy/riemann_test.go index e57cbb43cc2c4..6450956ff1275 100644 --- a/plugins/outputs/riemann_legacy/riemann_test.go +++ b/plugins/outputs/riemann_legacy/riemann_test.go @@ -8,9 +8,7 @@ import ( ) func TestConnectAndWrite(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + t.Skip("Skipping legacy integration test") url := testutil.GetLocalHost() + ":5555" diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md new file mode 100644 index 0000000000000..f21159c6426e0 --- /dev/null +++ b/plugins/outputs/sensu/README.md @@ -0,0 +1,97 @@ +# Sensu Go Output Plugin + +This plugin writes metrics events to [Sensu Go](https://sensu.io) via its +HTTP events API. + +### Configuration: + +```toml +[[outputs.sensu]] + ## BACKEND API URL is the Sensu Backend API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the corresponding backend API path + ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). + ## + ## Backend Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## AGENT API URL is the Sensu Agent API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the correspeonding agent API path (/events). + ## + ## Agent API Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output + ## plugin will use backend_api_url. If backend_api_url and agent_api_url are + ## not provided, the output plugin will default to use an agent_api_url of + ## http://127.0.0.1:3031 + ## + # backend_api_url = "http://127.0.0.1:8080" + # agent_api_url = "http://127.0.0.1:3031" + + ## API KEY is the Sensu Backend API token + ## Generate a new API token via: + ## + ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities + ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf + ## $ sensuctl user create telegraf --group telegraf --password REDACTED + ## $ sensuctl api-key grant telegraf + ## + ## For more information on Sensu RBAC profiles & API tokens, please visit: + ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ + ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ + ## + # api_key = "${SENSU_API_KEY}" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## Sensu Event details + ## + ## Below are the event details to be sent to Sensu. The main portions of the + ## event are the check, entity, and metrics specifications. For more information + ## on Sensu events and its components, please visit: + ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events + ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks + ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities + ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics + ## + ## Check specification + ## The check name is the name to give the Sensu check associated with the event + ## created. This maps to check.metatadata.name in the event. + [outputs.sensu.check] + name = "telegraf" + + ## Entity specification + ## Configure the entity name and namespace, if necessary. This will be part of + ## the entity.metadata in the event. + ## + ## NOTE: if the output plugin is configured to send events to a + ## backend_api_url and entity_name is not set, the value returned by + ## os.Hostname() will be used; if the output plugin is configured to send + ## events to an agent_api_url, entity_name and entity_namespace are not used. + # [outputs.sensu.entity] + # name = "server-01" + # namespace = "default" + + ## Metrics specification + ## Configure the tags for the metrics that are sent as part of the Sensu event + # [outputs.sensu.tags] + # source = "telegraf" + + ## Configure the handler(s) for processing the provided metrics + # [outputs.sensu.metrics] + # handlers = ["influxdb","elasticsearch"] +``` diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go new file mode 100644 index 0000000000000..3cd8b2274e52a --- /dev/null +++ b/plugins/outputs/sensu/sensu.go @@ -0,0 +1,512 @@ +package sensu + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math" + "net/http" + "net/url" + "os" + "path" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultURL = "http://127.0.0.1:3031" + defaultClientTimeout = 5 * time.Second + defaultContentType = "application/json; charset=utf-8" +) + +type OutputMetadata struct { + Name string `json:"name"` +} + +type OutputEntity struct { + Metadata *OutputMetadata `json:"metadata"` +} + +type OutputCheck struct { + Metadata *OutputMetadata `json:"metadata"` + Status int `json:"status"` + Output string `json:"output"` + Issued int64 `json:"issued"` + OutputMetricHandlers []string `json:"output_metric_handlers"` +} + +type OutputMetrics struct { + Handlers []string `json:"handlers"` + Metrics []*OutputMetric `json:"points"` +} + +type OutputMetric struct { + Name string `json:"name"` + Tags []*OutputTag `json:"tags"` + Value interface{} `json:"value"` + Timestamp int64 `json:"timestamp"` +} + +type OutputTag struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type OutputEvent struct { + Entity *OutputEntity `json:"entity,omitempty"` + Check *OutputCheck `json:"check"` + Metrics *OutputMetrics `json:"metrics"` + Timestamp int64 `json:"timestamp"` +} + +type SensuEntity struct { + Name *string `toml:"name"` + Namespace *string `toml:"namespace"` +} + +type SensuCheck struct { + Name *string `toml:"name"` +} + +type SensuMetrics struct { + Handlers []string `toml:"handlers"` +} + +type Sensu struct { + APIKey *string `toml:"api_key"` + AgentAPIURL *string `toml:"agent_api_url"` + BackendAPIURL *string `toml:"backend_api_url"` + Entity *SensuEntity `toml:"entity"` + Tags map[string]string `toml:"tags"` + Metrics *SensuMetrics `toml:"metrics"` + Check *SensuCheck `toml:"check"` + + Timeout config.Duration `toml:"timeout"` + ContentEncoding string `toml:"content_encoding"` + + EndpointURL string + OutEntity *OutputEntity + + Log telegraf.Logger `toml:"-"` + + tls.ClientConfig + client *http.Client +} + +var sampleConfig = ` + ## BACKEND API URL is the Sensu Backend API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the corresponding backend API path + ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). + ## + ## Backend Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## AGENT API URL is the Sensu Agent API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the correspeonding agent API path (/events). + ## + ## Agent API Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output + ## plugin will use backend_api_url. If backend_api_url and agent_api_url are + ## not provided, the output plugin will default to use an agent_api_url of + ## http://127.0.0.1:3031 + ## + # backend_api_url = "http://127.0.0.1:8080" + # agent_api_url = "http://127.0.0.1:3031" + + ## API KEY is the Sensu Backend API token + ## Generate a new API token via: + ## + ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities + ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf + ## $ sensuctl user create telegraf --group telegraf --password REDACTED + ## $ sensuctl api-key grant telegraf + ## + ## For more information on Sensu RBAC profiles & API tokens, please visit: + ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ + ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ + ## + # api_key = "${SENSU_API_KEY}" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## Sensu Event details + ## + ## Below are the event details to be sent to Sensu. The main portions of the + ## event are the check, entity, and metrics specifications. For more information + ## on Sensu events and its components, please visit: + ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events + ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks + ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities + ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics + ## + ## Check specification + ## The check name is the name to give the Sensu check associated with the event + ## created. This maps to check.metatadata.name in the event. + [outputs.sensu.check] + name = "telegraf" + + ## Entity specification + ## Configure the entity name and namespace, if necessary. This will be part of + ## the entity.metadata in the event. + ## + ## NOTE: if the output plugin is configured to send events to a + ## backend_api_url and entity_name is not set, the value returned by + ## os.Hostname() will be used; if the output plugin is configured to send + ## events to an agent_api_url, entity_name and entity_namespace are not used. + # [outputs.sensu.entity] + # name = "server-01" + # namespace = "default" + + ## Metrics specification + ## Configure the tags for the metrics that are sent as part of the Sensu event + # [outputs.sensu.tags] + # source = "telegraf" + + ## Configure the handler(s) for processing the provided metrics + # [outputs.sensu.metrics] + # handlers = ["influxdb","elasticsearch"] +` + +// Description provides a description of the plugin +func (s *Sensu) Description() string { + return "Send aggregate metrics to Sensu Monitor" +} + +// SampleConfig provides a sample configuration for the plugin +func (s *Sensu) SampleConfig() string { + return sampleConfig +} + +func (s *Sensu) createClient() (*http.Client, error) { + tlsCfg, err := s.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: time.Duration(s.Timeout), + } + + return client, nil +} + +func (s *Sensu) Connect() error { + err := s.setEndpointURL() + if err != nil { + return err + } + + err = s.setEntity() + if err != nil { + return err + } + + client, err := s.createClient() + if err != nil { + return err + } + + s.client = client + + return nil +} + +func (s *Sensu) Close() error { + s.client.CloseIdleConnections() + return nil +} + +func (s *Sensu) Write(metrics []telegraf.Metric) error { + var points []*OutputMetric + for _, metric := range metrics { + // Add tags from config to each metric point + tagList := make([]*OutputTag, 0, len(s.Tags)+len(metric.TagList())) + for name, value := range s.Tags { + tag := &OutputTag{ + Name: name, + Value: value, + } + tagList = append(tagList, tag) + } + for _, tagSet := range metric.TagList() { + tag := &OutputTag{ + Name: tagSet.Key, + Value: tagSet.Value, + } + tagList = append(tagList, tag) + } + + // Get all valid numeric values, convert to float64 + for _, fieldSet := range metric.FieldList() { + key := fieldSet.Key + value := getFloat(fieldSet.Value) + // JSON does not support these special values + if math.IsInf(value, 1) { + s.Log.Debugf("metric %s returned positive infinity, setting value to %f", key, math.MaxFloat64) + value = math.MaxFloat64 + } + if math.IsInf(value, -1) { + s.Log.Debugf("metric %s returned negative infinity, setting value to %f", key, -math.MaxFloat64) + value = -math.MaxFloat64 + } + if math.IsNaN(value) { + s.Log.Debugf("metric %s returned as non a number, skipping", key) + continue + } + + point := &OutputMetric{ + Name: metric.Name() + "." + key, + Tags: tagList, + Timestamp: metric.Time().Unix(), + Value: value, + } + points = append(points, point) + } + } + + reqBody, err := s.encodeToJSON(points) + if err != nil { + return err + } + + return s.write(reqBody) +} + +func (s *Sensu) write(reqBody []byte) error { + var reqBodyBuffer io.Reader = bytes.NewBuffer(reqBody) + method := http.MethodPost + + if s.ContentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reqBodyBuffer) + if err != nil { + return err + } + defer rc.Close() + reqBodyBuffer = rc + } + + req, err := http.NewRequest(method, s.EndpointURL, reqBodyBuffer) + if err != nil { + return err + } + + req.Header.Set("User-Agent", internal.ProductToken()) + + req.Header.Set("Content-Type", defaultContentType) + if s.ContentEncoding == "gzip" { + req.Header.Set("Content-Encoding", "gzip") + } + + if s.APIKey != nil { + req.Header.Set("Authorization", "Key "+*s.APIKey) + } + + resp, err := s.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + bodyData, err := io.ReadAll(resp.Body) + if err != nil { + s.Log.Debugf("Couldn't read response body: %v", err) + } + s.Log.Debugf("Failed to write, response: %v", string(bodyData)) + if resp.StatusCode < 400 || resp.StatusCode > 499 { + return fmt.Errorf("when writing to [%s] received status code: %d", s.EndpointURL, resp.StatusCode) + } + } + + return nil +} + +// Resolves the event write endpoint +func (s *Sensu) setEndpointURL() error { + var ( + endpointURL string + pathSuffix string + ) + + if s.BackendAPIURL != nil { + endpointURL = *s.BackendAPIURL + namespace := "default" + if s.Entity != nil && s.Entity.Namespace != nil { + namespace = *s.Entity.Namespace + } + pathSuffix = "/api/core/v2/namespaces/" + namespace + "/events" + } else if s.AgentAPIURL != nil { + endpointURL = *s.AgentAPIURL + pathSuffix = "/events" + } + + if len(endpointURL) == 0 { + s.Log.Debugf("no backend or agent API URL provided, falling back to default agent API URL %s", defaultURL) + endpointURL = defaultURL + pathSuffix = "/events" + } + + u, err := url.Parse(endpointURL) + if err != nil { + return err + } + + u.Path = path.Join(u.Path, pathSuffix) + s.EndpointURL = u.String() + + return nil +} + +func (s *Sensu) Init() error { + if len(s.ContentEncoding) != 0 { + validEncoding := []string{"identity", "gzip"} + if !choice.Contains(s.ContentEncoding, validEncoding) { + return fmt.Errorf("unsupported content_encoding [%q] specified", s.ContentEncoding) + } + } + + if s.BackendAPIURL != nil && s.APIKey == nil { + return fmt.Errorf("backend_api_url [%q] specified, but no API Key provided", *s.BackendAPIURL) + } + + return nil +} + +func init() { + outputs.Add("sensu", func() telegraf.Output { + // Default configuration values + + // make a string from the defaultURL const + agentAPIURL := defaultURL + + return &Sensu{ + AgentAPIURL: &agentAPIURL, + Timeout: config.Duration(defaultClientTimeout), + ContentEncoding: "identity", + } + }) +} + +func (s *Sensu) encodeToJSON(metricPoints []*OutputMetric) ([]byte, error) { + timestamp := time.Now().Unix() + + check, err := s.getCheck(metricPoints) + if err != nil { + return []byte{}, err + } + + output, err := json.Marshal(&OutputEvent{ + Entity: s.OutEntity, + Check: check, + Metrics: &OutputMetrics{ + Handlers: s.getHandlers(), + Metrics: metricPoints, + }, + Timestamp: timestamp, + }) + + return output, err +} + +// Constructs the entity payload +// Throws when no entity name is provided and fails resolve to hostname +func (s *Sensu) setEntity() error { + if s.BackendAPIURL != nil { + var entityName string + if s.Entity != nil && s.Entity.Name != nil { + entityName = *s.Entity.Name + } else { + defaultHostname, err := os.Hostname() + if err != nil { + return fmt.Errorf("resolving hostname failed: %v", err) + } + entityName = defaultHostname + } + + s.OutEntity = &OutputEntity{ + Metadata: &OutputMetadata{ + Name: entityName, + }, + } + return nil + } + s.OutEntity = &OutputEntity{} + return nil +} + +// Constructs the check payload +// Throws if check name is not provided +func (s *Sensu) getCheck(metricPoints []*OutputMetric) (*OutputCheck, error) { + count := len(metricPoints) + + if s.Check == nil || s.Check.Name == nil { + return &OutputCheck{}, fmt.Errorf("missing check name") + } + + return &OutputCheck{ + Metadata: &OutputMetadata{ + Name: *s.Check.Name, + }, + Status: 0, // Always OK + Issued: time.Now().Unix(), + Output: "Telegraf agent processed " + strconv.Itoa(count) + " metrics", + OutputMetricHandlers: s.getHandlers(), + }, nil +} + +func (s *Sensu) getHandlers() []string { + if s.Metrics == nil || s.Metrics.Handlers == nil { + return []string{} + } + return s.Metrics.Handlers +} + +func getFloat(unk interface{}) float64 { + switch i := unk.(type) { + case float64: + return i + case float32: + return float64(i) + case int64: + return float64(i) + case int32: + return float64(i) + case int: + return float64(i) + case uint64: + return float64(i) + case uint32: + return float64(i) + case uint: + return float64(i) + default: + return math.NaN() + } +} diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go new file mode 100644 index 0000000000000..e7a272ed5e149 --- /dev/null +++ b/plugins/outputs/sensu/sensu_test.go @@ -0,0 +1,210 @@ +package sensu + +import ( + "encoding/json" + "fmt" + "io" + "math" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/testutil" + corev2 "github.com/sensu/sensu-go/api/core/v2" + "github.com/stretchr/testify/require" +) + +func TestResolveEventEndpointUrl(t *testing.T) { + agentAPIURL := "http://127.0.0.1:3031" + backendAPIURL := "http://127.0.0.1:8080" + entityNamespace := "test-namespace" + emptyString := "" + tests := []struct { + name string + plugin *Sensu + expectedEndpointURL string + }{ + { + name: "agent event endpoint", + plugin: &Sensu{ + AgentAPIURL: &agentAPIURL, + Log: testutil.Logger{}, + }, + expectedEndpointURL: "http://127.0.0.1:3031/events", + }, + { + name: "backend event endpoint with default namespace", + plugin: &Sensu{ + AgentAPIURL: &agentAPIURL, + BackendAPIURL: &backendAPIURL, + Log: testutil.Logger{}, + }, + expectedEndpointURL: "http://127.0.0.1:8080/api/core/v2/namespaces/default/events", + }, + { + name: "backend event endpoint with namespace declared", + plugin: &Sensu{ + AgentAPIURL: &agentAPIURL, + BackendAPIURL: &backendAPIURL, + Entity: &SensuEntity{ + Namespace: &entityNamespace, + }, + Log: testutil.Logger{}, + }, + expectedEndpointURL: "http://127.0.0.1:8080/api/core/v2/namespaces/test-namespace/events", + }, + { + name: "agent event endpoint due to empty AgentAPIURL", + plugin: &Sensu{ + AgentAPIURL: &emptyString, + Log: testutil.Logger{}, + }, + expectedEndpointURL: "http://127.0.0.1:3031/events", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.setEndpointURL() + require.Equal(t, err, error(nil)) + require.Equal(t, tt.expectedEndpointURL, tt.plugin.EndpointURL) + }) + } +} + +func TestConnectAndWrite(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + testURL := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + testAPIKey := "a0b1c2d3-e4f5-g6h7-i8j9-k0l1m2n3o4p5" + testCheck := "telegraf" + testEntity := "entity1" + testNamespace := "default" + testHandler := "influxdb" + testTagName := "myTagName" + testTagValue := "myTagValue" + expectedAuthHeader := fmt.Sprintf("Key %s", testAPIKey) + expectedURL := fmt.Sprintf("/api/core/v2/namespaces/%s/events", testNamespace) + expectedPointName := "cpu" + expectedPointValue := float64(42) + + plugin := &Sensu{ + AgentAPIURL: nil, + BackendAPIURL: &testURL, + APIKey: &testAPIKey, + Check: &SensuCheck{ + Name: &testCheck, + }, + Entity: &SensuEntity{ + Name: &testEntity, + Namespace: &testNamespace, + }, + Metrics: &SensuMetrics{ + Handlers: []string{testHandler}, + }, + Tags: map[string]string{testTagName: testTagValue}, + Log: testutil.Logger{}, + } + + t.Run("connect", func(t *testing.T) { + err := plugin.Connect() + require.NoError(t, err) + }) + + t.Run("write", func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, expectedURL, r.URL.String()) + require.Equal(t, expectedAuthHeader, r.Header.Get("Authorization")) + // let's make sure what we received is a valid Sensu event that contains all of the expected data + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + receivedEvent := &corev2.Event{} + err = json.Unmarshal(body, receivedEvent) + require.NoError(t, err) + require.Equal(t, testCheck, receivedEvent.Check.Name) + require.Equal(t, testEntity, receivedEvent.Entity.Name) + require.NotEmpty(t, receivedEvent.Metrics) + require.Equal(t, true, choice.Contains(testHandler, receivedEvent.Metrics.Handlers)) + require.NotEmpty(t, receivedEvent.Metrics.Points) + pointFound := false + tagFound := false + for _, p := range receivedEvent.Metrics.Points { + if p.Name == expectedPointName+".value" && p.Value == expectedPointValue { + pointFound = true + require.NotEmpty(t, p.Tags) + for _, t := range p.Tags { + if t.Name == testTagName && t.Value == testTagValue { + tagFound = true + } + } + } + } + require.Equal(t, true, pointFound) + require.Equal(t, true, tagFound) + w.WriteHeader(http.StatusCreated) + }) + err := plugin.Write([]telegraf.Metric{testutil.TestMetric(expectedPointValue, expectedPointName)}) + require.NoError(t, err) + }) +} + +func TestGetFloat(t *testing.T) { + tests := []struct { + name string + value interface{} + expectedReturn float64 + }{ + { + name: "getfloat with float64", + value: float64(42), + expectedReturn: 42, + }, + { + name: "getfloat with float32", + value: float32(42), + expectedReturn: 42, + }, + { + name: "getfloat with int64", + value: int64(42), + expectedReturn: 42, + }, + { + name: "getfloat with int32", + value: int32(42), + expectedReturn: 42, + }, + { + name: "getfloat with int", + value: int(42), + expectedReturn: 42, + }, + { + name: "getfloat with uint64", + value: uint64(42), + expectedReturn: 42, + }, + { + name: "getfloat with uint32", + value: uint32(42), + expectedReturn: 42, + }, + { + name: "getfloat with uint", + value: uint(42), + expectedReturn: 42, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expectedReturn, getFloat(tt.value)) + }) + } + // Since math.NaN() == math.NaN() returns false + t.Run("getfloat NaN special case", func(t *testing.T) { + f := getFloat("42") + require.True(t, math.IsNaN(f)) + }) +} diff --git a/plugins/outputs/signalfx/README.md b/plugins/outputs/signalfx/README.md new file mode 100644 index 0000000000000..00b39cf30e229 --- /dev/null +++ b/plugins/outputs/signalfx/README.md @@ -0,0 +1,23 @@ +# SignalFx Output Plugin + +The SignalFx output plugin sends metrics to [SignalFx](https://docs.signalfx.com/en/latest/). + +### Configuration +```toml +[[outputs.signalfx]] + ## SignalFx Org Access Token + access_token = "my-secret-token" + + ## The SignalFx realm that your organization resides in + signalfx_realm = "us9" # Required if ingest_url is not set + + ## You can optionally provide a custom ingest url instead of the + ## signalfx_realm option above if you are using a gateway or proxy + ## instance. This option takes precident over signalfx_realm. + ingest_url = "https://my-custom-ingest/" + + ## Event typed metrics are omitted by default, + ## If you require an event typed metric you must specify the + ## metric name in the following list. + included_event_names = ["plugin.metric_name"] +``` diff --git a/plugins/outputs/signalfx/signalfx.go b/plugins/outputs/signalfx/signalfx.go new file mode 100644 index 0000000000000..d8452d7b7ffec --- /dev/null +++ b/plugins/outputs/signalfx/signalfx.go @@ -0,0 +1,255 @@ +package signalfx + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/signalfx/golib/v3/datapoint" + "github.com/signalfx/golib/v3/datapoint/dpsink" + "github.com/signalfx/golib/v3/event" + "github.com/signalfx/golib/v3/sfxclient" +) + +//init initializes the plugin context +func init() { + outputs.Add("signalfx", func() telegraf.Output { + return NewSignalFx() + }) +} + +// SignalFx plugin context +type SignalFx struct { + AccessToken string `toml:"access_token"` + SignalFxRealm string `toml:"signalfx_realm"` + IngestURL string `toml:"ingest_url"` + IncludedEventNames []string `toml:"included_event_names"` + + Log telegraf.Logger `toml:"-"` + + includedEventSet map[string]bool + client dpsink.Sink + + ctx context.Context + cancel context.CancelFunc +} + +var sampleConfig = ` + ## SignalFx Org Access Token + access_token = "my-secret-token" + + ## The SignalFx realm that your organization resides in + signalfx_realm = "us9" # Required if ingest_url is not set + + ## You can optionally provide a custom ingest url instead of the + ## signalfx_realm option above if you are using a gateway or proxy + ## instance. This option takes precident over signalfx_realm. + ingest_url = "https://my-custom-ingest/" + + ## Event typed metrics are omitted by default, + ## If you require an event typed metric you must specify the + ## metric name in the following list. + included_event_names = ["plugin.metric_name"] +` + +// GetMetricType returns the equivalent telegraf ValueType for a signalfx metric type +func GetMetricType(mtype telegraf.ValueType) (metricType datapoint.MetricType) { + switch mtype { + case telegraf.Counter: + metricType = datapoint.Counter + case telegraf.Gauge: + metricType = datapoint.Gauge + case telegraf.Summary: + metricType = datapoint.Gauge + case telegraf.Histogram: + metricType = datapoint.Gauge + case telegraf.Untyped: + metricType = datapoint.Gauge + default: + metricType = datapoint.Gauge + } + return metricType +} + +// NewSignalFx - returns a new context for the SignalFx output plugin +func NewSignalFx() *SignalFx { + ctx, cancel := context.WithCancel(context.Background()) + return &SignalFx{ + AccessToken: "", + SignalFxRealm: "", + IngestURL: "", + IncludedEventNames: []string{""}, + ctx: ctx, + cancel: cancel, + client: sfxclient.NewHTTPSink(), + } +} + +// Description returns a description for the plugin +func (s *SignalFx) Description() string { + return "Send metrics and events to SignalFx" +} + +// SampleConfig returns the sample configuration for the plugin +func (s *SignalFx) SampleConfig() string { + return sampleConfig +} + +// Connect establishes a connection to SignalFx +func (s *SignalFx) Connect() error { + client := s.client.(*sfxclient.HTTPSink) + client.AuthToken = s.AccessToken + + if s.IngestURL != "" { + client.DatapointEndpoint = datapointEndpointForIngestURL(s.IngestURL) + client.EventEndpoint = eventEndpointForIngestURL(s.IngestURL) + } else if s.SignalFxRealm != "" { + client.DatapointEndpoint = datapointEndpointForRealm(s.SignalFxRealm) + client.EventEndpoint = eventEndpointForRealm(s.SignalFxRealm) + } else { + return errors.New("signalfx_realm or ingest_url must be configured") + } + + return nil +} + +// Close closes any connections to SignalFx +func (s *SignalFx) Close() error { + s.cancel() + s.client.(*sfxclient.HTTPSink).Client.CloseIdleConnections() + return nil +} + +func (s *SignalFx) ConvertToSignalFx(metrics []telegraf.Metric) ([]*datapoint.Datapoint, []*event.Event) { + var dps []*datapoint.Datapoint + var events []*event.Event + + for _, metric := range metrics { + s.Log.Debugf("Processing the following measurement: %v", metric) + var timestamp = metric.Time() + + metricType := GetMetricType(metric.Type()) + for field, val := range metric.Fields() { + // Copy the metric tags because they are meant to be treated as + // immutable + var metricDims = metric.Tags() + + // Generate the metric name + metricName := getMetricName(metric.Name(), field) + + // Get the metric value as a datapoint value + if metricValue, err := datapoint.CastMetricValueWithBool(val); err == nil { + var dp = datapoint.New(metricName, + metricDims, + metricValue.(datapoint.Value), + metricType, + timestamp) + + s.Log.Debugf("Datapoint: %v", dp.String()) + + dps = append(dps, dp) + } else { + // Skip if it's not an explicitly included event + if !s.isEventIncluded(metricName) { + continue + } + + // We've already type checked field, so set property with value + metricProps := map[string]interface{}{"message": val} + var ev = event.NewWithProperties(metricName, + event.AGENT, + metricDims, + metricProps, + timestamp) + + s.Log.Debugf("Event: %v", ev.String()) + + events = append(events, ev) + } + } + } + + return dps, events +} + +// Write call back for writing metrics +func (s *SignalFx) Write(metrics []telegraf.Metric) error { + dps, events := s.ConvertToSignalFx(metrics) + + if len(dps) > 0 { + err := s.client.AddDatapoints(s.ctx, dps) + if err != nil { + return err + } + } + + if len(events) > 0 { + if err := s.client.AddEvents(s.ctx, events); err != nil { + // If events error out but we successfully sent some datapoints, + // don't return an error so that it won't ever retry -- that way we + // don't send the same datapoints twice. + if len(dps) == 0 { + return err + } + s.Log.Errorf("Failed to send SignalFx event: %v", err) + } + } + + return nil +} + +// isEventIncluded - checks whether a metric name for an event was put on the whitelist +func (s *SignalFx) isEventIncluded(name string) bool { + if s.includedEventSet == nil { + s.includedEventSet = make(map[string]bool, len(s.includedEventSet)) + for _, include := range s.IncludedEventNames { + s.includedEventSet[include] = true + } + } + return s.includedEventSet[name] +} + +// getMetricName combines telegraf fields and tags into a full metric name +func getMetricName(metric string, field string) string { + name := metric + + // Include field in metric name when it adds to the metric name + if field != "value" { + name = fmt.Sprintf("%s.%s", name, field) + } + + return name +} + +// ingestURLForRealm returns the base ingest URL for a particular SignalFx +// realm +func ingestURLForRealm(realm string) string { + return fmt.Sprintf("https://ingest.%s.signalfx.com", realm) +} + +// datapointEndpointForRealm returns the endpoint to which datapoints should be +// POSTed for a particular realm. +func datapointEndpointForRealm(realm string) string { + return datapointEndpointForIngestURL(ingestURLForRealm(realm)) +} + +// datapointEndpointForRealm returns the endpoint to which datapoints should be +// POSTed for a particular ingest base URL. +func datapointEndpointForIngestURL(ingestURL string) string { + return strings.TrimRight(ingestURL, "/") + "/v2/datapoint" +} + +// eventEndpointForRealm returns the endpoint to which events should be +// POSTed for a particular realm. +func eventEndpointForRealm(realm string) string { + return eventEndpointForIngestURL(ingestURLForRealm(realm)) +} + +// eventEndpointForRealm returns the endpoint to which events should be +// POSTed for a particular ingest base URL. +func eventEndpointForIngestURL(ingestURL string) string { + return strings.TrimRight(ingestURL, "/") + "/v2/event" +} diff --git a/plugins/outputs/signalfx/signalfx_test.go b/plugins/outputs/signalfx/signalfx_test.go new file mode 100644 index 0000000000000..d21cff82f62a2 --- /dev/null +++ b/plugins/outputs/signalfx/signalfx_test.go @@ -0,0 +1,699 @@ +package signalfx + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/testutil" + "github.com/signalfx/golib/v3/datapoint" + "github.com/signalfx/golib/v3/event" + "github.com/stretchr/testify/require" +) + +type sink struct { + dps []*datapoint.Datapoint + evs []*event.Event +} + +func (s *sink) AddDatapoints(_ context.Context, points []*datapoint.Datapoint) error { + s.dps = append(s.dps, points...) + return nil +} +func (s *sink) AddEvents(_ context.Context, events []*event.Event) error { + s.evs = append(s.evs, events...) + return nil +} + +type errorsink struct { + dps []*datapoint.Datapoint + evs []*event.Event +} + +func (e *errorsink) AddDatapoints(_ context.Context, _ []*datapoint.Datapoint) error { + return errors.New("not sending datapoints") +} +func (e *errorsink) AddEvents(_ context.Context, _ []*event.Event) error { + return errors.New("not sending events") +} +func TestSignalFx_SignalFx(t *testing.T) { + type measurement struct { + name string + tags map[string]string + fields map[string]interface{} + time time.Time + tp telegraf.ValueType + } + type fields struct { + IncludedEvents []string + } + type want struct { + datapoints []*datapoint.Datapoint + events []*event.Event + } + tests := []struct { + name string + fields fields + measurements []*measurement + want want + }{ + { + name: "add datapoints of all types", + fields: fields{}, + measurements: []*measurement{ + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"myboolmeasurement": true}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"myboolmeasurement": false}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{ + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Counter, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.myboolmeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewIntValue(int64(1)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.myboolmeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewIntValue(int64(0)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + events: []*event.Event{}, + }, + }, + { + name: "add events of all types", + fields: fields{ + IncludedEvents: []string{"event.mymeasurement"}, + }, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{ + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + }, + }, + { + name: "exclude events by default", + fields: fields{}, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"value": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + { + name: "add datapoint with field named value", + fields: fields{}, + measurements: []*measurement{ + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"value": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{ + datapoint.New( + "datapoint", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + events: []*event.Event{}, + }, + }, + { + name: "add event", + fields: fields{ + IncludedEvents: []string{"event.mymeasurement"}, + }, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{ + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + }, + }, + { + name: "exclude events that are not explicitly included", + fields: fields{}, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"value": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + { + name: "malformed metadata event", + fields: fields{}, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1", "sf_metric": "objects.host-meta-data"}, + fields: map[string]interface{}{"value": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := outputs.Outputs["signalfx"]().(*SignalFx) + s.IncludedEventNames = tt.fields.IncludedEvents + s.SignalFxRealm = "test" + s.Log = testutil.Logger{} + + require.Nil(t, s.Connect()) + + s.client = &sink{ + dps: []*datapoint.Datapoint{}, + evs: []*event.Event{}, + } + + measurements := []telegraf.Metric{} + + for _, measurement := range tt.measurements { + m := metric.New( + measurement.name, measurement.tags, measurement.fields, measurement.time, measurement.tp, + ) + + measurements = append(measurements, m) + } + + s.Write(measurements) + require.Eventually(t, func() bool { return len(s.client.(*sink).dps) == len(tt.want.datapoints) }, 5*time.Second, 100*time.Millisecond) + require.Eventually(t, func() bool { return len(s.client.(*sink).evs) == len(tt.want.events) }, 5*time.Second, 100*time.Millisecond) + + if !reflect.DeepEqual(s.client.(*sink).dps, tt.want.datapoints) { + t.Errorf("Collected datapoints do not match desired. Collected: %v Desired: %v", s.client.(*sink).dps, tt.want.datapoints) + } + if !reflect.DeepEqual(s.client.(*sink).evs, tt.want.events) { + t.Errorf("Collected events do not match desired. Collected: %v Desired: %v", s.client.(*sink).evs, tt.want.events) + } + }) + } +} + +func TestSignalFx_Errors(t *testing.T) { + type measurement struct { + name string + tags map[string]string + fields map[string]interface{} + time time.Time + tp telegraf.ValueType + } + type fields struct { + IncludedEvents []string + } + type want struct { + datapoints []*datapoint.Datapoint + events []*event.Event + } + tests := []struct { + name string + fields fields + measurements []*measurement + want want + }{ + { + name: "add datapoints of all types", + fields: fields{}, + measurements: []*measurement{ + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + { + name: "add events of all types", + fields: fields{ + IncludedEvents: []string{"event.mymeasurement"}, + }, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := outputs.Outputs["signalfx"]().(*SignalFx) + // constrain the buffer to cover code that emits when batch size is met + s.IncludedEventNames = tt.fields.IncludedEvents + s.SignalFxRealm = "test" + s.Log = testutil.Logger{} + + require.Nil(t, s.Connect()) + + s.client = &errorsink{ + dps: []*datapoint.Datapoint{}, + evs: []*event.Event{}, + } + + for _, measurement := range tt.measurements { + m := metric.New( + measurement.name, measurement.tags, measurement.fields, measurement.time, measurement.tp, + ) + + s.Write([]telegraf.Metric{m}) + } + for !(len(s.client.(*errorsink).dps) == len(tt.want.datapoints) && len(s.client.(*errorsink).evs) == len(tt.want.events)) { + time.Sleep(1 * time.Second) + } + if !reflect.DeepEqual(s.client.(*errorsink).dps, tt.want.datapoints) { + t.Errorf("Collected datapoints do not match desired. Collected: %v Desired: %v", s.client.(*errorsink).dps, tt.want.datapoints) + } + if !reflect.DeepEqual(s.client.(*errorsink).evs, tt.want.events) { + t.Errorf("Collected events do not match desired. Collected: %v Desired: %v", s.client.(*errorsink).evs, tt.want.events) + } + }) + } +} + +// this is really just for complete code coverage +func TestSignalFx_Description(t *testing.T) { + tests := []struct { + name string + want string + }{ + { + name: "verify description is correct", + want: "Send metrics and events to SignalFx", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &SignalFx{} + if got := s.Description(); got != tt.want { + t.Errorf("SignalFx.Description() = %v, want %v", got, tt.want) + } + }) + } +} + +// this is also just for complete code coverage +func TestSignalFx_SampleConfig(t *testing.T) { + tests := []struct { + name string + want string + }{ + { + name: "verify sample config is returned", + want: sampleConfig, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &SignalFx{} + if got := s.SampleConfig(); got != tt.want { + t.Errorf("SignalFx.SampleConfig() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetMetricName(t *testing.T) { + type args struct { + metric string + field string + dims map[string]string + } + tests := []struct { + name string + args args + want string + wantsfx bool + }{ + { + name: "fields that equal value should not be append to metricname", + args: args{ + metric: "datapoint", + field: "value", + dims: map[string]string{ + "testDimKey": "testDimVal", + }, + }, + want: "datapoint", + }, + { + name: "fields other than 'value' with out sf_metric dim should return measurement.fieldname as metric name", + args: args{ + metric: "datapoint", + field: "test", + dims: map[string]string{ + "testDimKey": "testDimVal", + }, + }, + want: "datapoint.test", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getMetricName(tt.args.metric, tt.args.field) + if got != tt.want { + t.Errorf("getMetricName() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index dae7edc0e806f..2546faa6779d7 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -6,8 +6,10 @@ import ( "log" "net" "strings" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -17,7 +19,7 @@ import ( type SocketWriter struct { ContentEncoding string `toml:"content_encoding"` Address string - KeepAlivePeriod *internal.Duration + KeepAlivePeriod *config.Duration tlsint.ClientConfig serializers.Serializer @@ -117,13 +119,13 @@ func (sw *SocketWriter) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(sw.Address, "://", 2)[0]) } - if sw.KeepAlivePeriod.Duration == 0 { + if *sw.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(sw.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*sw.KeepAlivePeriod)) } // Write writes the given metrics to the destination. diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 14b25e6c570ff..0decb644cccab 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -2,10 +2,10 @@ package socket_writer import ( "bufio" - "io/ioutil" "net" "os" "path/filepath" + "runtime" "sync" "testing" @@ -28,7 +28,7 @@ func TestSocketWriter_tcp(t *testing.T) { lconn, err := listener.Accept() require.NoError(t, err) - testSocketWriter_stream(t, sw, lconn) + testSocketWriterStream(t, sw, lconn) } func TestSocketWriter_udp(t *testing.T) { @@ -41,11 +41,11 @@ func TestSocketWriter_udp(t *testing.T) { err = sw.Connect() require.NoError(t, err) - testSocketWriter_packet(t, sw, listener) + testSocketWriterPacket(t, sw, listener) } func TestSocketWriter_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TestSocketWriter_unix.sock") @@ -62,11 +62,15 @@ func TestSocketWriter_unix(t *testing.T) { lconn, err := listener.Accept() require.NoError(t, err) - testSocketWriter_stream(t, sw, lconn) + testSocketWriterStream(t, sw, lconn) } func TestSocketWriter_unixgram(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TSW_unixgram.sock") @@ -80,10 +84,10 @@ func TestSocketWriter_unixgram(t *testing.T) { err = sw.Connect() require.NoError(t, err) - testSocketWriter_packet(t, sw, listener) + testSocketWriterPacket(t, sw, listener) } -func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) { +func testSocketWriterStream(t *testing.T, sw *SocketWriter, lconn net.Conn) { metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) @@ -105,7 +109,7 @@ func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) { assert.Equal(t, string(mbs2out), mstr2in) } -func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketConn) { +func testSocketWriterPacket(t *testing.T, sw *SocketWriter, lconn net.PacketConn) { metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) @@ -207,5 +211,5 @@ func TestSocketWriter_udp_gzip(t *testing.T) { err = sw.Connect() require.NoError(t, err) - testSocketWriter_packet(t, sw, listener) + testSocketWriterPacket(t, sw, listener) } diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md new file mode 100644 index 0000000000000..77b89762a7a87 --- /dev/null +++ b/plugins/outputs/sql/README.md @@ -0,0 +1,151 @@ +# SQL Output Plugin + +The SQL output plugin saves Telegraf metric data to an SQL database. + +The plugin uses a simple, hard-coded database schema. There is a table +for each metric type and the table name is the metric name. There is a +column per field and a column per tag. There is an optional column for +the metric timestamp. + +A row is written for every input metric. This means multiple metrics +are never merged into a single row, even if they have the same metric +name, tags, and timestamp. + +The plugin uses Golang's generic "database/sql" interface and third +party drivers. See the driver-specific section below for a list of +supported drivers and details. Additional drivers may be added in +future Telegraf releases. + +## Getting started + +To use the plugin, set the driver setting to the driver name +appropriate for your database. Then set the data source name +(DSN). The format of the DSN varies by driver but often includes a +username, password, the database instance to use, and the hostname of +the database server. The user account must have privileges to insert +rows and create tables. + +## Generated SQL + +The plugin generates simple ANSI/ISO SQL that is likely to work on any +DBMS. It doesn't use language features that are specific to a +particular DBMS. If you want to use a feature that is specific to a +particular DBMS, you may be able to set it up manually outside of this +plugin or through the init_sql setting. + +The insert statements generated by the plugin use placeholder +parameters. Most database drivers use question marks as placeholders +but postgres uses indexed dollar signs. The plugin chooses which +placeholder style to use depending on the driver selected. + +## Advanced options + +When the plugin first connects it runs SQL from the init_sql setting, +allowing you to perform custom initialization for the connection. + +Before inserting a row, the plugin checks whether the table exists. If +it doesn't exist, the plugin creates the table. The existence check +and the table creation statements can be changed through template +settings. The template settings allows you to have the plugin create +customized tables or skip table creation entirely by setting the check +template to any query that executes without error, such as "select 1". + +The name of the timestamp column is "timestamp" but it can be changed +with the timestamp\_column setting. The timestamp column can be +completely disabled by setting it to "". + +By changing the table creation template, it's possible with some +databases to save a row insertion timestamp. You can add an additional +column with a default value to the template, like "CREATE TABLE +{TABLE}(insertion_timestamp TIMESTAMP DEFAULT CURRENT\_TIMESTAMP, +{COLUMNS})". + +The mapping of metric types to sql column types can be customized +through the convert settings. + +## Configuration + +``` +# Save metrics to an SQL Database +[[outputs.sql]] + ## Database driver + ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), + ## sqlite (SQLite3), snowflake (snowflake.com) + # driver = "" + + ## Data source name + ## The format of the data source name is different for each database driver. + ## See the plugin readme for details. + # data_source_name = "" + + ## Timestamp column name + # timestamp_column = "timestamp" + + ## Table creation template + ## Available template variables: + ## {TABLE} - table name as a quoted identifier + ## {TABLELITERAL} - table name as a quoted string literal + ## {COLUMNS} - column definitions (list of quoted identifiers and types) + # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + + ## Table existence check template + ## Available template variables: + ## {TABLE} - tablename as a quoted identifier + # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" + + ## Initialization SQL + # init_sql = "" + + ## Metric type to SQL type conversion + #[outputs.sql.convert] + # integer = "INT" + # real = "DOUBLE" + # text = "TEXT" + # timestamp = "TIMESTAMP" + # defaultvalue = "TEXT" + # unsigned = "UNSIGNED" + # bool = "BOOL" +``` + +## Driver-specific information + +### go-sql-driver/mysql + +MySQL default quoting differs from standard ANSI/ISO SQL quoting. You +must use MySQL's ANSI\_QUOTES mode with this plugin. You can enable +this mode by using the setting `init_sql = "SET +sql_mode='ANSI_QUOTES';"` or through a command-line option when +running MySQL. See MySQL's docs for [details on +ANSI\_QUOTES](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_ansi_quotes) +and [how to set the SQL +mode](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sql-mode-setting). + +You can use a DSN of the format +"username:password@tcp(host:port)/dbname". See the [driver +docs](https://github.com/go-sql-driver/mysql) for details. + +### jackc/pgx + +You can use a DSN of the format +"postgres://username:password@host:port/dbname". See the [driver +docs](https://github.com/jackc/pgx) for more details. + +### modernc.org/sqlite + +This driver is not available on all operating systems and +architectures. It is only included in Linux builds on amd64, 386, +arm64, arm, and Darwin on amd64. It is not available for Windows, +FreeBSD, and other Linux and Darwin platforms. + +The DSN is a filename or url with scheme "file:". See the [driver +docs](https://modernc.org/sqlite) for details. + +### denisenkom/go-mssqldb + +Telegraf doesn't have unit tests for go-mssqldb so it should be +treated as experimental. + +### snowflakedb/gosnowflake + +Telegraf doesn't have unit tests for gosnowflake so it should be +treated as experimental. diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go new file mode 100644 index 0000000000000..fecaf2f6e7661 --- /dev/null +++ b/plugins/outputs/sql/sql.go @@ -0,0 +1,281 @@ +package sql + +import ( + gosql "database/sql" + "fmt" + "strings" + + //Register sql drivers + _ "github.com/denisenkom/go-mssqldb" // mssql (sql server) + _ "github.com/go-sql-driver/mysql" // mysql + _ "github.com/jackc/pgx/v4/stdlib" // pgx (postgres) + _ "github.com/snowflakedb/gosnowflake" // snowflake + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type ConvertStruct struct { + Integer string + Real string + Text string + Timestamp string + Defaultvalue string + Unsigned string + Bool string +} + +type SQL struct { + Driver string + DataSourceName string + TimestampColumn string + TableTemplate string + TableExistsTemplate string + InitSQL string `toml:"init_sql"` + Convert ConvertStruct + + db *gosql.DB + Log telegraf.Logger `toml:"-"` + tables map[string]bool +} + +func (p *SQL) Connect() error { + db, err := gosql.Open(p.Driver, p.DataSourceName) + if err != nil { + return err + } + + err = db.Ping() + if err != nil { + return err + } + + if p.InitSQL != "" { + _, err = db.Exec(p.InitSQL) + if err != nil { + return err + } + } + + p.db = db + p.tables = make(map[string]bool) + + return nil +} + +func (p *SQL) Close() error { + return p.db.Close() +} + +// Quote an identifier (table or column name) +func quoteIdent(name string) string { + return `"` + strings.Replace(sanitizeQuoted(name), `"`, `""`, -1) + `"` +} + +// Quote a string literal +func quoteStr(name string) string { + return "'" + strings.Replace(name, "'", "''", -1) + "'" +} + +func sanitizeQuoted(in string) string { + // https://dev.mysql.com/doc/refman/8.0/en/identifiers.html + // https://www.postgresql.org/docs/13/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS + + // Whitelist allowed characters + return strings.Map(func(r rune) rune { + switch { + case r >= '\u0001' && r <= '\uFFFF': + return r + default: + return '_' + } + }, in) +} + +func (p *SQL) deriveDatatype(value interface{}) string { + var datatype string + + switch value.(type) { + case int64: + datatype = p.Convert.Integer + case uint64: + datatype = fmt.Sprintf("%s %s", p.Convert.Integer, p.Convert.Unsigned) + case float64: + datatype = p.Convert.Real + case string: + datatype = p.Convert.Text + case bool: + datatype = p.Convert.Bool + default: + datatype = p.Convert.Defaultvalue + p.Log.Errorf("Unknown datatype: '%T' %v", value, value) + } + return datatype +} + +var sampleConfig = ` + ## Database driver + ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), + ## sqlite (SQLite3), snowflake (snowflake.com) + # driver = "" + + ## Data source name + ## The format of the data source name is different for each database driver. + ## See the plugin readme for details. + # data_source_name = "" + + ## Timestamp column name + # timestamp_column = "timestamp" + + ## Table creation template + ## Available template variables: + ## {TABLE} - table name as a quoted identifier + ## {TABLELITERAL} - table name as a quoted string literal + ## {COLUMNS} - column definitions (list of quoted identifiers and types) + # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + + ## Table existence check template + ## Available template variables: + ## {TABLE} - tablename as a quoted identifier + # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" + + ## Initialization SQL + # init_sql = "" + + ## Metric type to SQL type conversion + #[outputs.sql.convert] + # integer = "INT" + # real = "DOUBLE" + # text = "TEXT" + # timestamp = "TIMESTAMP" + # defaultvalue = "TEXT" + # unsigned = "UNSIGNED" +` + +func (p *SQL) SampleConfig() string { return sampleConfig } +func (p *SQL) Description() string { return "Send metrics to SQL Database" } + +func (p *SQL) generateCreateTable(metric telegraf.Metric) string { + var columns []string + // ## {KEY_COLUMNS} is a comma-separated list of key columns (timestamp and tags) + //var pk []string + + if p.TimestampColumn != "" { + //pk = append(pk, quoteIdent(p.TimestampColumn)) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(p.TimestampColumn), p.Convert.Timestamp)) + } + + for _, tag := range metric.TagList() { + //pk = append(pk, quoteIdent(tag.Key)) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(tag.Key), p.Convert.Text)) + } + + var datatype string + for _, field := range metric.FieldList() { + datatype = p.deriveDatatype(field.Value) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(field.Key), datatype)) + } + + query := p.TableTemplate + query = strings.Replace(query, "{TABLE}", quoteIdent(metric.Name()), -1) + query = strings.Replace(query, "{TABLELITERAL}", quoteStr(metric.Name()), -1) + query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) + //query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) + + return query +} + +func (p *SQL) generateInsert(tablename string, columns []string) string { + var placeholders, quotedColumns []string + for _, column := range columns { + quotedColumns = append(quotedColumns, quoteIdent(column)) + } + if p.Driver == "pgx" { + // Postgres uses $1 $2 $3 as placeholders + for i := 0; i < len(columns); i++ { + placeholders = append(placeholders, fmt.Sprintf("$%d", i+1)) + } + } else { + // Everything else uses ? ? ? as placeholders + for i := 0; i < len(columns); i++ { + placeholders = append(placeholders, "?") + } + } + + return fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", + quoteIdent(tablename), + strings.Join(quotedColumns, ","), + strings.Join(placeholders, ",")) +} + +func (p *SQL) tableExists(tableName string) bool { + stmt := strings.Replace(p.TableExistsTemplate, "{TABLE}", quoteIdent(tableName), -1) + + _, err := p.db.Exec(stmt) + return err == nil +} + +func (p *SQL) Write(metrics []telegraf.Metric) error { + for _, metric := range metrics { + tablename := metric.Name() + + // create table if needed + if !p.tables[tablename] && !p.tableExists(tablename) { + createStmt := p.generateCreateTable(metric) + _, err := p.db.Exec(createStmt) + if err != nil { + return err + } + p.tables[tablename] = true + } + + var columns []string + var values []interface{} + + if p.TimestampColumn != "" { + columns = append(columns, p.TimestampColumn) + values = append(values, metric.Time()) + } + + for column, value := range metric.Tags() { + columns = append(columns, column) + values = append(values, value) + } + + for column, value := range metric.Fields() { + columns = append(columns, column) + values = append(values, value) + } + + sql := p.generateInsert(tablename, columns) + _, err := p.db.Exec(sql, values...) + + if err != nil { + // check if insert error was caused by column mismatch + p.Log.Errorf("Error during insert: %v, %v", err, sql) + return err + } + } + return nil +} + +func init() { + outputs.Add("sql", func() telegraf.Output { return newSQL() }) +} + +func newSQL() *SQL { + return &SQL{ + TableTemplate: "CREATE TABLE {TABLE}({COLUMNS})", + TableExistsTemplate: "SELECT 1 FROM {TABLE} LIMIT 1", + TimestampColumn: "timestamp", + Convert: ConvertStruct{ + Integer: "INT", + Real: "DOUBLE", + Text: "TEXT", + Timestamp: "TIMESTAMP", + Defaultvalue: "TEXT", + Unsigned: "UNSIGNED", + Bool: "BOOL", + }, + } +} diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go new file mode 100644 index 0000000000000..ef02c89b11fad --- /dev/null +++ b/plugins/outputs/sql/sql_test.go @@ -0,0 +1,336 @@ +package sql + +import ( + "context" + "fmt" + "math/rand" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" +) + +func TestSqlQuote(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } +} + +func TestSqlCreateStatement(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } +} + +func TestSqlInsertStatement(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } +} + +func pwgen(n int) string { + charset := []byte("abcdedfghijklmnopqrstABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + nchars := len(charset) + buffer := make([]byte, n) + + for i := range buffer { + buffer[i] = charset[rand.Intn(nchars)] + } + + return string(buffer) +} + +func stableMetric( + name string, + tags []telegraf.Tag, + fields []telegraf.Field, + tm time.Time, + tp ...telegraf.ValueType, +) telegraf.Metric { + // We want to compare the output of this plugin with expected + // output. Maps don't preserve order so comparison fails. There's + // no metric constructor that takes a slice of tag and slice of + // field, just the one that takes maps. + // + // To preserve order, construct the metric without tags and fields + // and then add them using AddTag and AddField. Those are stable. + m := metric.New(name, map[string]string{}, map[string]interface{}{}, tm, tp...) + for _, tag := range tags { + m.AddTag(tag.Key, tag.Value) + } + for _, field := range fields { + m.AddField(field.Key, field.Value) + } + return m +} + +var ( + // 2021-05-17T22:04:45+00:00 + // or 2021-05-17T16:04:45-06:00 + ts = time.Unix(1621289085, 0).UTC() + + testMetrics = []telegraf.Metric{ + stableMetric( + "metric_one", + []telegraf.Tag{ + { + Key: "tag_one", + Value: "tag1", + }, + { + Key: "tag_two", + Value: "tag2", + }, + }, + []telegraf.Field{ + { + Key: "int64_one", + Value: int64(1234), + }, + { + Key: "int64_two", + Value: int64(2345), + }, + { + Key: "bool_one", + Value: true, + }, + { + Key: "bool_two", + Value: false, + }, + }, + ts, + ), + stableMetric( + "metric_two", + []telegraf.Tag{ + { + Key: "tag_three", + Value: "tag3", + }, + }, + []telegraf.Field{ + { + Key: "string_one", + Value: "string1", + }, + }, + ts, + ), + stableMetric( //test spaces in metric, tag, and field names + "metric three", + []telegraf.Tag{ + { + Key: "tag four", + Value: "tag4", + }, + }, + []telegraf.Field{ + { + Key: "string two", + Value: "string2", + }, + }, + ts, + ), + } +) + +func TestMysqlIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + initdb, err := filepath.Abs("testdata/mariadb/initdb") + require.NoError(t, err) + + // initdb/script.sql creates this database + const dbname = "foo" + + // The mariadb image lets you set the root password through an env + // var. We'll use root to insert and query test data. + const username = "root" + + password := pwgen(32) + outDir, err := os.MkdirTemp("", "tg-mysql-*") + require.NoError(t, err) + defer os.RemoveAll(outDir) + + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "mariadb", + Env: map[string]string{ + "MARIADB_ROOT_PASSWORD": password, + }, + BindMounts: map[string]string{ + initdb: "/docker-entrypoint-initdb.d", + outDir: "/out", + }, + ExposedPorts: []string{"3306/tcp"}, + WaitingFor: wait.ForListeningPort("3306/tcp"), + }, + Started: true, + } + mariadbContainer, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, mariadbContainer.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + host, err := mariadbContainer.Host(ctx) + require.NoError(t, err, "getting container host address failed") + require.NotEmpty(t, host) + natPort, err := mariadbContainer.MappedPort(ctx, "3306/tcp") + require.NoError(t, err, "getting container host port failed") + port := natPort.Port() + require.NotEmpty(t, port) + + //use the plugin to write to the database + address := fmt.Sprintf("%v:%v@tcp(%v:%v)/%v", + username, password, host, port, dbname, + ) + p := newSQL() + p.Log = testutil.Logger{} + p.Driver = "mysql" + p.DataSourceName = address + //p.Convert.Timestamp = "TEXT" //disable mysql default current_timestamp() + p.InitSQL = "SET sql_mode='ANSI_QUOTES';" + + require.NoError(t, p.Connect()) + require.NoError(t, p.Write( + testMetrics, + )) + + //dump the database + var rc int + rc, err = mariadbContainer.Exec(ctx, []string{ + "bash", + "-c", + "mariadb-dump --user=" + username + + " --password=" + password + + " --compact --skip-opt " + + dbname + + " > /out/dump", + }) + require.NoError(t, err) + require.Equal(t, 0, rc) + dumpfile := filepath.Join(outDir, "dump") + require.FileExists(t, dumpfile) + + //compare the dump to what we expected + expected, err := os.ReadFile("testdata/mariadb/expected.sql") + require.NoError(t, err) + actual, err := os.ReadFile(dumpfile) + require.NoError(t, err) + require.Equal(t, string(expected), string(actual)) +} + +func TestPostgresIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + initdb, err := filepath.Abs("testdata/postgres/initdb") + require.NoError(t, err) + + // initdb/init.sql creates this database + const dbname = "foo" + + // default username for postgres is postgres + const username = "postgres" + + password := pwgen(32) + outDir, err := os.MkdirTemp("", "tg-postgres-*") + require.NoError(t, err) + defer os.RemoveAll(outDir) + + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "postgres", + Env: map[string]string{ + "POSTGRES_PASSWORD": password, + }, + BindMounts: map[string]string{ + initdb: "/docker-entrypoint-initdb.d", + outDir: "/out", + }, + ExposedPorts: []string{"5432/tcp"}, + WaitingFor: wait.ForListeningPort("5432/tcp"), + }, + Started: true, + } + cont, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, cont.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + host, err := cont.Host(ctx) + require.NoError(t, err, "getting container host address failed") + require.NotEmpty(t, host) + natPort, err := cont.MappedPort(ctx, "5432/tcp") + require.NoError(t, err, "getting container host port failed") + port := natPort.Port() + require.NotEmpty(t, port) + + //use the plugin to write to the database + // host, port, username, password, dbname + address := fmt.Sprintf("postgres://%v:%v@%v:%v/%v", + username, password, host, port, dbname, + ) + p := newSQL() + p.Log = testutil.Logger{} + p.Driver = "pgx" + p.DataSourceName = address + + require.NoError(t, p.Connect()) + require.NoError(t, p.Write( + testMetrics, + )) + + //dump the database + //psql -u postgres + var rc int + rc, err = cont.Exec(ctx, []string{ + "bash", + "-c", + "pg_dump" + + " --username=" + username + + //" --password=" + password + + // " --compact --skip-opt " + + " --no-comments" + + //" --data-only" + + " " + dbname + + // pg_dump's output has comments that include build info + // of postgres and pg_dump. The build info changes with + // each release. To prevent these changes from causing the + // test to fail, we strip out comments. Also strip out + // blank lines. + "|grep -E -v '(^--|^$)'" + + " > /out/dump 2>&1", + }) + require.NoError(t, err) + require.Equal(t, 0, rc) + dumpfile := filepath.Join(outDir, "dump") + require.FileExists(t, dumpfile) + + //compare the dump to what we expected + expected, err := os.ReadFile("testdata/postgres/expected.sql") + require.NoError(t, err) + actual, err := os.ReadFile(dumpfile) + require.NoError(t, err) + require.Equal(t, string(expected), string(actual)) +} diff --git a/plugins/outputs/sql/sqlite.go b/plugins/outputs/sql/sqlite.go new file mode 100644 index 0000000000000..15666101a957d --- /dev/null +++ b/plugins/outputs/sql/sqlite.go @@ -0,0 +1,14 @@ +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin +// +build !mips !mips64 + +package sql + +// The modernc.org sqlite driver isn't supported on all +// platforms. Register it with build constraints to prevent build +// failures on unsupported platforms. +import ( + _ "modernc.org/sqlite" // Register sqlite sql driver +) diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go new file mode 100644 index 0000000000000..7707f9d085e7e --- /dev/null +++ b/plugins/outputs/sql/sqlite_test.go @@ -0,0 +1,135 @@ +//go:build linux && freebsd && (!mips || !mips64) +// +build linux +// +build freebsd +// +build !mips !mips64 + +package sql + +import ( + gosql "database/sql" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSqlite(t *testing.T) { + outDir, err := os.MkdirTemp("", "tg-sqlite-*") + require.NoError(t, err) + defer os.RemoveAll(outDir) + + dbfile := filepath.Join(outDir, "db") + + // Use the plugin to write to the database address := + // fmt.Sprintf("file:%v", dbfile) + address := dbfile // accepts a path or a file: URI + p := newSQL() + p.Log = testutil.Logger{} + p.Driver = "sqlite" + p.DataSourceName = address + + require.NoError(t, p.Connect()) + require.NoError(t, p.Write( + testMetrics, + )) + + //read directly from the database + db, err := gosql.Open("sqlite", address) + require.NoError(t, err) + defer db.Close() + + var countMetricOne int + require.NoError(t, db.QueryRow("select count(*) from metric_one").Scan(&countMetricOne)) + require.Equal(t, 1, countMetricOne) + + var countMetricTwo int + require.NoError(t, db.QueryRow("select count(*) from metric_one").Scan(&countMetricTwo)) + require.Equal(t, 1, countMetricTwo) + + var rows *gosql.Rows + + // Check that tables were created as expected + rows, err = db.Query("select sql from sqlite_master") + require.NoError(t, err) + var sql string + require.True(t, rows.Next()) + require.NoError(t, rows.Scan(&sql)) + require.Equal(t, + `CREATE TABLE "metric_one"("timestamp" TIMESTAMP,"tag_one" TEXT,"tag_two" TEXT,"int64_one" INT,"int64_two" INT)`, + sql, + ) + require.True(t, rows.Next()) + require.NoError(t, rows.Scan(&sql)) + require.Equal(t, + `CREATE TABLE "metric_two"("timestamp" TIMESTAMP,"tag_three" TEXT,"string_one" TEXT)`, + sql, + ) + require.True(t, rows.Next()) + require.NoError(t, rows.Scan(&sql)) + require.Equal(t, + `CREATE TABLE "metric three"("timestamp" TIMESTAMP,"tag four" TEXT,"string two" TEXT)`, + sql, + ) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck + + // sqlite stores dates as strings. They may be in the local + // timezone. The test needs to parse them back into a time.Time to + // check them. + //timeLayout := "2006-01-02 15:04:05 -0700 MST" + timeLayout := "2006-01-02T15:04:05Z" + var actualTime time.Time + + // Check contents of tables + rows, err = db.Query("select timestamp, tag_one, tag_two, int64_one, int64_two from metric_one") + require.NoError(t, err) + require.True(t, rows.Next()) + var ( + a string + b, c string + d, e int64 + ) + require.NoError(t, rows.Scan(&a, &b, &c, &d, &e)) + actualTime, err = time.Parse(timeLayout, a) + require.NoError(t, err) + require.Equal(t, ts, actualTime.UTC()) + require.Equal(t, "tag1", b) + require.Equal(t, "tag2", c) + require.Equal(t, int64(1234), d) + require.Equal(t, int64(2345), e) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck + + rows, err = db.Query("select timestamp, tag_three, string_one from metric_two") + require.NoError(t, err) + require.True(t, rows.Next()) + var ( + f, g, h string + ) + require.NoError(t, rows.Scan(&f, &g, &h)) + actualTime, err = time.Parse(timeLayout, f) + require.NoError(t, err) + require.Equal(t, ts, actualTime.UTC()) + require.Equal(t, "tag3", g) + require.Equal(t, "string1", h) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck + + rows, err = db.Query(`select timestamp, "tag four", "string two" from "metric three"`) + require.NoError(t, err) + require.True(t, rows.Next()) + var ( + i, j, k string + ) + require.NoError(t, rows.Scan(&i, &j, &k)) + actualTime, err = time.Parse(timeLayout, i) + require.NoError(t, err) + require.Equal(t, ts, actualTime.UTC()) + require.Equal(t, "tag4", j) + require.Equal(t, "string2", k) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck +} diff --git a/plugins/outputs/sql/testdata/mariadb/expected.sql b/plugins/outputs/sql/testdata/mariadb/expected.sql new file mode 100644 index 0000000000000..43e0fa5e545b0 --- /dev/null +++ b/plugins/outputs/sql/testdata/mariadb/expected.sql @@ -0,0 +1,38 @@ +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `bar` ( + `baz` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `bar` VALUES (1); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric three` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag four` text DEFAULT NULL, + `string two` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric three` VALUES ('2021-05-17 22:04:45','tag4','string2'); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_one` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_one` text DEFAULT NULL, + `tag_two` text DEFAULT NULL, + `int64_one` int(11) DEFAULT NULL, + `int64_two` int(11) DEFAULT NULL, + `bool_one` tinyint(1) DEFAULT NULL, + `bool_two` tinyint(1) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345,1,0); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_two` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_three` text DEFAULT NULL, + `string_one` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_two` VALUES ('2021-05-17 22:04:45','tag3','string1'); diff --git a/plugins/outputs/sql/testdata/mariadb/initdb/script.sql b/plugins/outputs/sql/testdata/mariadb/initdb/script.sql new file mode 100644 index 0000000000000..7e155e105f15a --- /dev/null +++ b/plugins/outputs/sql/testdata/mariadb/initdb/script.sql @@ -0,0 +1,4 @@ +create database foo; +use foo; +create table bar (baz int); +insert into bar (baz) values (1); diff --git a/plugins/outputs/sql/testdata/postgres/expected.sql b/plugins/outputs/sql/testdata/postgres/expected.sql new file mode 100644 index 0000000000000..c1ee733ac12d4 --- /dev/null +++ b/plugins/outputs/sql/testdata/postgres/expected.sql @@ -0,0 +1,43 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; +SET default_tablespace = ''; +SET default_table_access_method = heap; +CREATE TABLE public."metric three" ( + "timestamp" timestamp without time zone, + "tag four" text, + "string two" text +); +ALTER TABLE public."metric three" OWNER TO postgres; +CREATE TABLE public.metric_one ( + "timestamp" timestamp without time zone, + tag_one text, + tag_two text, + int64_one integer, + int64_two integer, + bool_one boolean, + bool_two boolean +); +ALTER TABLE public.metric_one OWNER TO postgres; +CREATE TABLE public.metric_two ( + "timestamp" timestamp without time zone, + tag_three text, + string_one text +); +ALTER TABLE public.metric_two OWNER TO postgres; +COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; +2021-05-17 22:04:45 tag4 string2 +\. +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two, bool_one, bool_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 t f +\. +COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; +2021-05-17 22:04:45 tag3 string1 +\. diff --git a/plugins/outputs/sql/testdata/postgres/initdb/init.sql b/plugins/outputs/sql/testdata/postgres/initdb/init.sql new file mode 100644 index 0000000000000..0694ada11fbbe --- /dev/null +++ b/plugins/outputs/sql/testdata/postgres/initdb/init.sql @@ -0,0 +1,2 @@ +create database foo; + diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 3bd38614b985e..d6b24ff78839b 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -9,8 +9,7 @@ import ( "sort" "strings" - monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. - googlepb "github.com/golang/protobuf/ptypes/timestamp" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" // Imports the Stackdriver Monitoring client package. "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" @@ -18,6 +17,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) // Stackdriver is the Google Stackdriver config info. @@ -71,11 +71,11 @@ var sampleConfig = ` // Connect initiates the primary connection to the GCP project. func (s *Stackdriver) Connect() error { if s.Project == "" { - return fmt.Errorf("Project is a required field for stackdriver output") + return fmt.Errorf("project is a required field for stackdriver output") } if s.Namespace == "" { - return fmt.Errorf("Namespace is a required field for stackdriver output") + return fmt.Errorf("namespace is a required field for stackdriver output") } if s.ResourceType == "" { @@ -218,7 +218,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { // Prepare time series request. timeSeriesRequest := &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), TimeSeries: timeSeries, } @@ -247,16 +247,16 @@ func getStackdriverTimeInterval( switch m { case metricpb.MetricDescriptor_GAUGE: return &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: end, }, }, nil case metricpb.MetricDescriptor_CUMULATIVE: return &monitoringpb.TimeInterval{ - StartTime: &googlepb.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: start, }, - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: end, }, }, nil @@ -300,7 +300,7 @@ func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, erro case int64: return &monitoringpb.TypedValue{ Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v), + Int64Value: v, }, }, nil case float64: @@ -312,7 +312,7 @@ func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, erro case bool: return &monitoringpb.TypedValue{ Value: &monitoringpb.TypedValue_BoolValue{ - BoolValue: bool(v), + BoolValue: v, }, }, nil case string: diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 7ddaa44854620..bb2a620e93668 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -11,10 +11,7 @@ import ( "testing" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" - "github.com/golang/protobuf/proto" - emptypb "github.com/golang/protobuf/ptypes/empty" - googlepb "github.com/golang/protobuf/ptypes/timestamp" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -22,6 +19,9 @@ import ( monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" ) // clientOpt is the option tests should use to connect to the test server. @@ -181,7 +181,7 @@ func TestWriteAscendingTime(t *testing.T) { ts := request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1, }, }) @@ -196,7 +196,7 @@ func TestWriteAscendingTime(t *testing.T) { ts = request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 2, }, }) @@ -311,7 +311,7 @@ func TestWriteBatchable(t *testing.T) { ts := request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 3, }, }) @@ -324,7 +324,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[1] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1, }, }) @@ -337,7 +337,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[2] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 3, }, }) @@ -350,7 +350,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[4] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 5, }, }) diff --git a/plugins/outputs/sumologic/README.md b/plugins/outputs/sumologic/README.md index 78f0eb3370a80..20fb757999a80 100644 --- a/plugins/outputs/sumologic/README.md +++ b/plugins/outputs/sumologic/README.md @@ -45,7 +45,7 @@ by Sumologic HTTP Source: ## Bear in mind that in some serializer a metric even though serialized to multiple ## lines cannot be split any further so setting this very low might not work ## as expected. - # max_request_body_size = 1_000_000 + # max_request_body_size = 1000000 ## Additional, Sumo specific options. ## Full list can be found here: diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 3c3f4a649705d..088210b9d1ff9 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -3,7 +3,6 @@ package sumologic import ( "bytes" "compress/gzip" - "context" "log" "net/http" "time" @@ -28,7 +27,7 @@ const ( ## Data format to be used for sending metrics. ## This will set the "Content-Type" header accordingly. - ## Currently supported formats: + ## Currently supported formats: ## * graphite - for Content-Type of application/vnd.sumologic.graphite ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus @@ -43,14 +42,14 @@ const ( ## Timeout used for HTTP request # timeout = "5s" - + ## Max HTTP request body size in bytes before compression (if applied). ## By default 1MB is recommended. ## NOTE: ## Bear in mind that in some serializer a metric even though serialized to multiple ## lines cannot be split any further so setting this very low might not work ## as expected. - # max_request_body_size = 1_000_000 + # max_request_body_size = 1000000 ## Additional, Sumo specific options. ## Full list can be found here: @@ -75,7 +74,7 @@ const ( defaultClientTimeout = 5 * time.Second defaultMethod = http.MethodPost - defaultMaxRequestBodySize = 1_000_000 + defaultMaxRequestBodySize = 1000000 contentTypeHeader = "Content-Type" carbon2ContentType = "application/vnd.sumologic.carbon2" @@ -93,9 +92,9 @@ const ( ) type SumoLogic struct { - URL string `toml:"url"` - Timeout internal.Duration `toml:"timeout"` - MaxRequstBodySize config.Size `toml:"max_request_body_size"` + URL string `toml:"url"` + Timeout config.Duration `toml:"timeout"` + MaxRequstBodySize config.Size `toml:"max_request_body_size"` SourceName string `toml:"source_name"` SourceHost string `toml:"source_host"` @@ -139,13 +138,13 @@ func (s *SumoLogic) SetSerializer(serializer serializers.Serializer) { s.serializer = serializer } -func (s *SumoLogic) createClient(ctx context.Context) (*http.Client, error) { +func (s *SumoLogic) createClient() *http.Client { return &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: s.Timeout.Duration, - }, nil + Timeout: time.Duration(s.Timeout), + } } func (s *SumoLogic) Connect() error { @@ -153,16 +152,11 @@ func (s *SumoLogic) Connect() error { return errors.Wrap(s.err, "sumologic: incorrect configuration") } - if s.Timeout.Duration == 0 { - s.Timeout.Duration = defaultClientTimeout + if s.Timeout == 0 { + s.Timeout = config.Duration(defaultClientTimeout) } - client, err := s.createClient(context.Background()) - if err != nil { - return err - } - - s.client = client + s.client = s.createClient() return nil } @@ -335,9 +329,7 @@ func setHeaderIfSetInConfig(r *http.Request, h header, value string) { func Default() *SumoLogic { return &SumoLogic{ - Timeout: internal.Duration{ - Duration: defaultClientTimeout, - }, + Timeout: config.Duration(defaultClientTimeout), MaxRequstBodySize: defaultMaxRequestBodySize, headers: make(map[string]string), } diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 48450ab450f3e..5629defa4506e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -6,7 +6,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -27,7 +26,7 @@ import ( ) func getMetric(t *testing.T) telegraf.Metric { - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -35,15 +34,15 @@ func getMetric(t *testing.T) telegraf.Metric { }, time.Unix(0, 0), ) - require.NoError(t, err) return m } -func getMetrics(t *testing.T, count int) []telegraf.Metric { +func getMetrics(t *testing.T) []telegraf.Metric { + const count = 100 var metrics = make([]telegraf.Metric, count) for i := 0; i < count; i++ { - m, err := metric.New( + m := metric.New( fmt.Sprintf("cpu-%d", i), map[string]string{ "ec2_instance": "aws-129038123", @@ -58,7 +57,6 @@ func getMetrics(t *testing.T, count int) []telegraf.Metric { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics[i] = m } return metrics @@ -95,7 +93,7 @@ func TestMethod(t *testing.T) { w.WriteHeader(http.StatusOK) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin := tt.plugin() @@ -172,7 +170,7 @@ func TestStatusCode(t *testing.T) { w.WriteHeader(tt.statusCode) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) tt.plugin.SetSerializer(serializer) @@ -198,7 +196,7 @@ func TestContentType(t *testing.T) { s.headers = map[string]string{ contentTypeHeader: carbon2ContentType, } - sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) s.SetSerializer(sr) return s @@ -212,7 +210,7 @@ func TestContentType(t *testing.T) { s.headers = map[string]string{ contentTypeHeader: carbon2ContentType, } - sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatMetricIncludesField)) + sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatMetricIncludesField), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) s.SetSerializer(sr) return s @@ -301,7 +299,7 @@ func TestContentEncodingGzip(t *testing.T) { body, err := gzip.NewReader(r.Body) require.NoError(t, err) - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) assert.Equal(t, string(payload), "metric=cpu field=value 42 0\n") @@ -309,7 +307,7 @@ func TestContentEncodingGzip(t *testing.T) { w.WriteHeader(http.StatusNoContent) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin := tt.plugin() @@ -344,7 +342,7 @@ func TestDefaultUserAgent(t *testing.T) { MaxRequstBodySize: Default().MaxRequstBodySize, } - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin.SetSerializer(serializer) @@ -450,8 +448,6 @@ func TestMaxRequestBodySize(t *testing.T) { u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) require.NoError(t, err) - const count = 100 - testcases := []struct { name string plugin func() *SumoLogic @@ -479,7 +475,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.URL = u.String() return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 1, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -494,7 +490,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 43_749 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 2, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -507,7 +503,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 10_000 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 5, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -520,7 +516,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 5_000 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 10, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -533,7 +529,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 2_500 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 20, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -546,7 +542,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 1_000 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 50, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -559,7 +555,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 500 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 100, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -572,7 +568,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 300 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 100, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -595,7 +591,7 @@ func TestMaxRequestBodySize(t *testing.T) { w.WriteHeader(http.StatusOK) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin := tt.plugin() @@ -627,7 +623,7 @@ func TestTryingToSendEmptyMetricsDoesntFail(t *testing.T) { plugin := Default() plugin.URL = u.String() - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin.SetSerializer(serializer) diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 1b46d02e210b0..570ed15a79e6b 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -7,11 +7,12 @@ import ( "net" "strconv" "strings" + "time" - "github.com/influxdata/go-syslog/v2/nontransparent" - "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/go-syslog/v3/nontransparent" + "github.com/influxdata/go-syslog/v3/rfc5424" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -19,7 +20,7 @@ import ( type Syslog struct { Address string - KeepAlivePeriod *internal.Duration + KeepAlivePeriod *config.Duration DefaultSdid string DefaultSeverityCode uint8 DefaultFacilityCode uint8 @@ -149,13 +150,13 @@ func (s *Syslog) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(s.Address, "://", 2)[0]) } - if s.KeepAlivePeriod.Duration == 0 { + if *s.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*s.KeepAlivePeriod)) } func (s *Syslog) Close() error { diff --git a/plugins/outputs/syslog/syslog_mapper.go b/plugins/outputs/syslog/syslog_mapper.go index 4e4848205ca28..28c74f3f97a6d 100644 --- a/plugins/outputs/syslog/syslog_mapper.go +++ b/plugins/outputs/syslog/syslog_mapper.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/go-syslog/v3/rfc5424" "github.com/influxdata/telegraf" ) diff --git a/plugins/outputs/syslog/syslog_mapper_test.go b/plugins/outputs/syslog/syslog_mapper_test.go index 300d5fcabe561..d4bbc1d6f0ed9 100644 --- a/plugins/outputs/syslog/syslog_mapper_test.go +++ b/plugins/outputs/syslog/syslog_mapper_test.go @@ -15,7 +15,7 @@ func TestSyslogMapperWithDefaults(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{}, map[string]interface{}{}, @@ -34,7 +34,7 @@ func TestSyslogMapperWithHostname(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "hostname": "testhost", @@ -54,7 +54,7 @@ func TestSyslogMapperWithHostnameSourceFallback(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "source": "sourcevalue", @@ -74,7 +74,7 @@ func TestSyslogMapperWithHostnameHostFallback(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "host": "hostvalue", @@ -94,7 +94,7 @@ func TestSyslogMapperWithDefaultSdid(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "appname": "testapp", @@ -130,7 +130,7 @@ func TestSyslogMapperWithDefaultSdidAndOtherSdids(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "appname": "testapp", @@ -167,7 +167,7 @@ func TestSyslogMapperWithNoSdids(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "appname": "testapp", diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go index 7581a7b5380d5..d9e082e5f9042 100644 --- a/plugins/outputs/syslog/syslog_test.go +++ b/plugins/outputs/syslog/syslog_test.go @@ -20,7 +20,7 @@ func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "hostname": "testhost", @@ -44,7 +44,7 @@ func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { s.Framing = framing.NonTransparent // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "hostname": "testhost", @@ -92,7 +92,7 @@ func TestSyslogWriteWithUdp(t *testing.T) { func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) { metrics := []telegraf.Metric{} - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{}, map[string]interface{}{}, @@ -116,7 +116,7 @@ func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) { func testSyslogWriteWithPacket(t *testing.T, s *Syslog, lconn net.PacketConn) { s.Framing = framing.NonTransparent metrics := []telegraf.Metric{} - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{}, map[string]interface{}{}, diff --git a/plugins/outputs/timestream/README.md b/plugins/outputs/timestream/README.md new file mode 100644 index 0000000000000..dc063a06854d3 --- /dev/null +++ b/plugins/outputs/timestream/README.md @@ -0,0 +1,155 @@ +# Timestream Output Plugin + +The Timestream output plugin writes metrics to the [Amazon Timestream] service. + +### Configuration + +```toml +# Configuration for sending metrics to Amazon Timestream. +[[outputs.timestream]] + ## Amazon Region + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Timestream database where the metrics will be inserted. + ## The database must exist prior to starting Telegraf. + database_name = "yourDatabaseNameHere" + + ## Specifies if the plugin should describe the Timestream database upon starting + ## to validate if it has access necessary permissions, connection, etc., as a safety check. + ## If the describe operation fails, the plugin will not start + ## and therefore the Telegraf agent will not start. + describe_database_on_start = false + + ## The mapping mode specifies how Telegraf records are represented in Timestream. + ## Valid values are: single-table, multi-table. + ## For example, consider the following data in line protocol format: + ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 + ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 + ## where weather and airquality are the measurement names, location and season are tags, + ## and temperature, humidity, no2, pm25 are fields. + ## In multi-table mode: + ## - first line will be ingested to table named weather + ## - second line will be ingested to table named airquality + ## - the tags will be represented as dimensions + ## - first table (weather) will have two records: + ## one with measurement name equals to temperature, + ## another with measurement name equals to humidity + ## - second table (airquality) will have two records: + ## one with measurement name equals to no2, + ## another with measurement name equals to pm25 + ## - the Timestream tables from the example will look like this: + ## TABLE "weather": + ## time | location | season | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 + ## TABLE "airquality": + ## time | location | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-west | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | pm25 | 16 + ## In single-table mode: + ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) + ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) + ## - location and season will be represented as dimensions + ## - temperature, humidity, no2, pm25 will be represented as measurement name + ## - the Timestream table from the example will look like this: + ## Assuming: + ## - single_table_name = "my_readings" + ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" + ## TABLE "my_readings": + ## time | location | season | namespace | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 + ## In most cases, using multi-table mapping mode is recommended. + ## However, you can consider using single-table in situations when you have thousands of measurement names. + mapping_mode = "multi-table" + + ## Only valid and required for mapping_mode = "single-table" + ## Specifies the Timestream table where the metrics will be uploaded. + # single_table_name = "yourTableNameHere" + + ## Only valid and required for mapping_mode = "single-table" + ## Describes what will be the Timestream dimension name for the Telegraf + ## measurement name. + # single_table_dimension_name_for_telegraf_measurement_name = "namespace" + + ## Specifies if the plugin should create the table, if the table do not exist. + ## The plugin writes the data without prior checking if the table exists. + ## When the table does not exist, the error returned from Timestream will cause + ## the plugin to create the table, if this parameter is set to true. + create_table_if_not_exists = true + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table magnetic store retention period in days. + ## Check Timestream documentation for more details. + create_table_magnetic_store_retention_period_in_days = 365 + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table memory store retention period in hours. + ## Check Timestream documentation for more details. + create_table_memory_store_retention_period_in_hours = 24 + + ## Only valid and optional if create_table_if_not_exists = true + ## Specifies the Timestream table tags. + ## Check Timestream documentation for more details + # create_table_tags = { "foo" = "bar", "environment" = "dev"} +``` + +### Batching + +Timestream WriteInputRequest.CommonAttributes are used to efficiently write data to Timestream. + +### Multithreading + +Single thread is used to write the data to Timestream, following general plugin design pattern. + +### Errors + +In case of an attempt to write an unsupported by Timestream Telegraf Field type, the field is dropped and error is emitted to the logs. + +In case of receiving ThrottlingException or InternalServerException from Timestream, the errors are returned to Telegraf, in which case Telegraf will keep the metrics in buffer and retry writing those metrics on the next flush. + +In case of receiving ResourceNotFoundException: + - If `create_table_if_not_exists` configuration is set to `true`, the plugin will try to create appropriate table and write the records again, if the table creation was successful. + - If `create_table_if_not_exists` configuration is set to `false`, the records are dropped, and an error is emitted to the logs. + +In case of receiving any other AWS error from Timestream, the records are dropped, and an error is emitted to the logs, as retrying such requests isn't likely to succeed. + +### Logging + +Turn on debug flag in the Telegraf to turn on detailed logging (including records being written to Timestream). + +### Testing + +Execute unit tests with: + +``` +go test -v ./plugins/outputs/timestream/... +``` + +[Amazon Timestream]: https://aws.amazon.com/timestream/ \ No newline at end of file diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go new file mode 100644 index 0000000000000..6478563b6b245 --- /dev/null +++ b/plugins/outputs/timestream/timestream.go @@ -0,0 +1,601 @@ +package timestream + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "hash/fnv" + "reflect" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" + "github.com/aws/smithy-go" + internalaws "github.com/influxdata/telegraf/config/aws" +) + +type ( + Timestream struct { + MappingMode string `toml:"mapping_mode"` + DescribeDatabaseOnStart bool `toml:"describe_database_on_start"` + DatabaseName string `toml:"database_name"` + + SingleTableName string `toml:"single_table_name"` + SingleTableDimensionNameForTelegrafMeasurementName string `toml:"single_table_dimension_name_for_telegraf_measurement_name"` + + CreateTableIfNotExists bool `toml:"create_table_if_not_exists"` + CreateTableMagneticStoreRetentionPeriodInDays int64 `toml:"create_table_magnetic_store_retention_period_in_days"` + CreateTableMemoryStoreRetentionPeriodInHours int64 `toml:"create_table_memory_store_retention_period_in_hours"` + CreateTableTags map[string]string `toml:"create_table_tags"` + + Log telegraf.Logger + svc WriteClient + + internalaws.CredentialConfig + } + + WriteClient interface { + CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) + WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) + DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) + } +) + +// Mapping modes specify how Telegraf model should be represented in Timestream model. +// See sample config for more details. +const ( + MappingModeSingleTable = "single-table" + MappingModeMultiTable = "multi-table" +) + +// MaxRecordsPerCall reflects Timestream limit of WriteRecords API call +const MaxRecordsPerCall = 100 + +var sampleConfig = ` + ## Amazon Region + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order: + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Timestream database where the metrics will be inserted. + ## The database must exist prior to starting Telegraf. + database_name = "yourDatabaseNameHere" + + ## Specifies if the plugin should describe the Timestream database upon starting + ## to validate if it has access necessary permissions, connection, etc., as a safety check. + ## If the describe operation fails, the plugin will not start + ## and therefore the Telegraf agent will not start. + describe_database_on_start = false + + ## The mapping mode specifies how Telegraf records are represented in Timestream. + ## Valid values are: single-table, multi-table. + ## For example, consider the following data in line protocol format: + ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 + ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 + ## where weather and airquality are the measurement names, location and season are tags, + ## and temperature, humidity, no2, pm25 are fields. + ## In multi-table mode: + ## - first line will be ingested to table named weather + ## - second line will be ingested to table named airquality + ## - the tags will be represented as dimensions + ## - first table (weather) will have two records: + ## one with measurement name equals to temperature, + ## another with measurement name equals to humidity + ## - second table (airquality) will have two records: + ## one with measurement name equals to no2, + ## another with measurement name equals to pm25 + ## - the Timestream tables from the example will look like this: + ## TABLE "weather": + ## time | location | season | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 + ## TABLE "airquality": + ## time | location | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-west | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | pm25 | 16 + ## In single-table mode: + ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) + ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) + ## - location and season will be represented as dimensions + ## - temperature, humidity, no2, pm25 will be represented as measurement name + ## - the Timestream table from the example will look like this: + ## Assuming: + ## - single_table_name = "my_readings" + ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" + ## TABLE "my_readings": + ## time | location | season | namespace | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 + ## In most cases, using multi-table mapping mode is recommended. + ## However, you can consider using single-table in situations when you have thousands of measurement names. + mapping_mode = "multi-table" + + ## Only valid and required for mapping_mode = "single-table" + ## Specifies the Timestream table where the metrics will be uploaded. + # single_table_name = "yourTableNameHere" + + ## Only valid and required for mapping_mode = "single-table" + ## Describes what will be the Timestream dimension name for the Telegraf + ## measurement name. + # single_table_dimension_name_for_telegraf_measurement_name = "namespace" + + ## Specifies if the plugin should create the table, if the table do not exist. + ## The plugin writes the data without prior checking if the table exists. + ## When the table does not exist, the error returned from Timestream will cause + ## the plugin to create the table, if this parameter is set to true. + create_table_if_not_exists = true + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table magnetic store retention period in days. + ## Check Timestream documentation for more details. + create_table_magnetic_store_retention_period_in_days = 365 + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table memory store retention period in hours. + ## Check Timestream documentation for more details. + create_table_memory_store_retention_period_in_hours = 24 + + ## Only valid and optional if create_table_if_not_exists = true + ## Specifies the Timestream table tags. + ## Check Timestream documentation for more details + # create_table_tags = { "foo" = "bar", "environment" = "dev"} +` + +// WriteFactory function provides a way to mock the client instantiation for testing purposes. +var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + cfg, err := credentialConfig.Credentials() + if err != nil { + return ×treamwrite.Client{}, err + } + return timestreamwrite.NewFromConfig(cfg), nil +} + +func (t *Timestream) Connect() error { + if t.DatabaseName == "" { + return fmt.Errorf("DatabaseName key is required") + } + + if t.MappingMode == "" { + return fmt.Errorf("MappingMode key is required") + } + + if t.MappingMode != MappingModeSingleTable && t.MappingMode != MappingModeMultiTable { + return fmt.Errorf("correct MappingMode key values are: '%s', '%s'", + MappingModeSingleTable, MappingModeMultiTable) + } + + if t.MappingMode == MappingModeSingleTable { + if t.SingleTableName == "" { + return fmt.Errorf("in '%s' mapping mode, SingleTableName key is required", MappingModeSingleTable) + } + + if t.SingleTableDimensionNameForTelegrafMeasurementName == "" { + return fmt.Errorf("in '%s' mapping mode, SingleTableDimensionNameForTelegrafMeasurementName key is required", + MappingModeSingleTable) + } + } + + if t.MappingMode == MappingModeMultiTable { + if t.SingleTableName != "" { + return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableName key", MappingModeMultiTable) + } + + if t.SingleTableDimensionNameForTelegrafMeasurementName != "" { + return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableDimensionNameForTelegrafMeasurementName key", MappingModeMultiTable) + } + } + + if t.CreateTableIfNotExists { + if t.CreateTableMagneticStoreRetentionPeriodInDays < 1 { + return fmt.Errorf("if Telegraf should create tables, CreateTableMagneticStoreRetentionPeriodInDays key should have a value greater than 0") + } + + if t.CreateTableMemoryStoreRetentionPeriodInHours < 1 { + return fmt.Errorf("if Telegraf should create tables, CreateTableMemoryStoreRetentionPeriodInHours key should have a value greater than 0") + } + } + + t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) + + svc, err := WriteFactory(&t.CredentialConfig) + if err != nil { + return err + } + + if t.DescribeDatabaseOnStart { + t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region) + + describeDatabaseInput := ×treamwrite.DescribeDatabaseInput{ + DatabaseName: aws.String(t.DatabaseName), + } + describeDatabaseOutput, err := svc.DescribeDatabase(context.Background(), describeDatabaseInput) + if err != nil { + t.Log.Errorf("Couldn't describe database '%s'. Check error, fix permissions, connectivity, create database.", t.DatabaseName) + return err + } + t.Log.Infof("Describe database '%s' returned: '%s'.", t.DatabaseName, describeDatabaseOutput) + } + + t.svc = svc + return nil +} + +func (t *Timestream) Close() error { + return nil +} + +func (t *Timestream) SampleConfig() string { + return sampleConfig +} + +func (t *Timestream) Description() string { + return "Configuration for Amazon Timestream output." +} + +func init() { + outputs.Add("timestream", func() telegraf.Output { + return &Timestream{} + }) +} + +func (t *Timestream) Write(metrics []telegraf.Metric) error { + writeRecordsInputs := t.TransformMetrics(metrics) + for _, writeRecordsInput := range writeRecordsInputs { + if err := t.writeToTimestream(writeRecordsInput, true); err != nil { + return err + } + } + return nil +} + +func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteRecordsInput, resourceNotFoundRetry bool) error { + t.Log.Debugf("Writing to Timestream: '%v' with ResourceNotFoundRetry: '%t'", writeRecordsInput, resourceNotFoundRetry) + + _, err := t.svc.WriteRecords(context.Background(), writeRecordsInput) + if err != nil { + // Telegraf will retry ingesting the metrics if an error is returned from the plugin. + // Therefore, return error only for retryable exceptions: ThrottlingException and 5xx exceptions. + var notFound *types.ResourceNotFoundException + if errors.As(err, ¬Found) { + if resourceNotFoundRetry { + t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", + t.DatabaseName, *writeRecordsInput.TableName, notFound) + return t.createTableAndRetry(writeRecordsInput) + } + t.logWriteToTimestreamError(notFound, writeRecordsInput.TableName) + } + + var rejected *types.RejectedRecordsException + if errors.As(err, &rejected) { + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + return nil + } + + var throttling *types.ThrottlingException + if errors.As(err, &throttling) { + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, throttling) + } + + var internal *types.InternalServerException + if errors.As(err, &internal) { + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, internal) + } + + var operation *smithy.OperationError + if !errors.As(err, &operation) { + // Retry other, non-aws errors. + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, err) + } + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + } + return nil +} + +func (t *Timestream) logWriteToTimestreamError(err error, tableName *string) { + t.Log.Errorf("Failed to write to Timestream database '%s' table '%s'. Skipping metric! Error: '%s'", + t.DatabaseName, *tableName, err) +} + +func (t *Timestream) createTableAndRetry(writeRecordsInput *timestreamwrite.WriteRecordsInput) error { + if t.CreateTableIfNotExists { + t.Log.Infof("Trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'true'.", *writeRecordsInput.TableName, t.DatabaseName) + if err := t.createTable(writeRecordsInput.TableName); err != nil { + t.Log.Errorf("Failed to create table '%s' in database '%s': %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err) + } else { + t.Log.Infof("Table '%s' in database '%s' created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName) + return t.writeToTimestream(writeRecordsInput, false) + } + } else { + t.Log.Errorf("Not trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName) + } + return nil +} + +// createTable creates a Timestream table according to the configuration. +func (t *Timestream) createTable(tableName *string) error { + createTableInput := ×treamwrite.CreateTableInput{ + DatabaseName: aws.String(t.DatabaseName), + TableName: aws.String(*tableName), + RetentionProperties: &types.RetentionProperties{ + MagneticStoreRetentionPeriodInDays: t.CreateTableMagneticStoreRetentionPeriodInDays, + MemoryStoreRetentionPeriodInHours: t.CreateTableMemoryStoreRetentionPeriodInHours, + }, + } + var tags []types.Tag + for key, val := range t.CreateTableTags { + tags = append(tags, types.Tag{ + Key: aws.String(key), + Value: aws.String(val), + }) + } + createTableInput.Tags = tags + + _, err := t.svc.CreateTable(context.Background(), createTableInput) + if err != nil { + if _, ok := err.(*types.ConflictException); ok { + // if the table was created in the meantime, it's ok. + return nil + } + return err + } + return nil +} + +// TransformMetrics transforms a collection of Telegraf Metrics into write requests to Timestream. +// Telegraf Metrics are grouped by Name, Tag Keys and Time to use Timestream CommonAttributes. +// Returns collection of write requests to be performed to Timestream. +func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwrite.WriteRecordsInput { + writeRequests := make(map[uint64]*timestreamwrite.WriteRecordsInput, len(metrics)) + for _, m := range metrics { + // build MeasureName, MeasureValue, MeasureValueType + records := t.buildWriteRecords(m) + if len(records) == 0 { + continue + } + id := hashFromMetricTimeNameTagKeys(m) + if curr, ok := writeRequests[id]; !ok { + // No current CommonAttributes/WriteRecordsInput found for current Telegraf Metric + dimensions := t.buildDimensions(m) + timeUnit, timeValue := getTimestreamTime(m.Time()) + newWriteRecord := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(t.DatabaseName), + Records: records, + CommonAttributes: &types.Record{ + Dimensions: dimensions, + Time: aws.String(timeValue), + TimeUnit: timeUnit, + }, + } + if t.MappingMode == MappingModeSingleTable { + newWriteRecord.TableName = &t.SingleTableName + } + if t.MappingMode == MappingModeMultiTable { + newWriteRecord.TableName = aws.String(m.Name()) + } + + writeRequests[id] = newWriteRecord + } else { + curr.Records = append(curr.Records, records...) + } + } + + // Create result as array of WriteRecordsInput. Split requests over records count limit to smaller requests. + var result []*timestreamwrite.WriteRecordsInput + for _, writeRequest := range writeRequests { + if len(writeRequest.Records) > MaxRecordsPerCall { + for _, recordsPartition := range partitionRecords(MaxRecordsPerCall, writeRequest.Records) { + newWriteRecord := ×treamwrite.WriteRecordsInput{ + DatabaseName: writeRequest.DatabaseName, + TableName: writeRequest.TableName, + Records: recordsPartition, + CommonAttributes: writeRequest.CommonAttributes, + } + result = append(result, newWriteRecord) + } + } else { + result = append(result, writeRequest) + } + } + return result +} + +func hashFromMetricTimeNameTagKeys(m telegraf.Metric) uint64 { + h := fnv.New64a() + h.Write([]byte(m.Name())) + h.Write([]byte("\n")) + for _, tag := range m.TagList() { + if tag.Key == "" { + continue + } + + h.Write([]byte(tag.Key)) + h.Write([]byte("\n")) + h.Write([]byte(tag.Value)) + h.Write([]byte("\n")) + } + b := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(b, uint64(m.Time().UnixNano())) + h.Write(b[:n]) + h.Write([]byte("\n")) + return h.Sum64() +} + +func (t *Timestream) buildDimensions(point telegraf.Metric) []types.Dimension { + var dimensions []types.Dimension + for tagName, tagValue := range point.Tags() { + dimension := types.Dimension{ + Name: aws.String(tagName), + Value: aws.String(tagValue), + } + dimensions = append(dimensions, dimension) + } + if t.MappingMode == MappingModeSingleTable { + dimension := types.Dimension{ + Name: aws.String(t.SingleTableDimensionNameForTelegrafMeasurementName), + Value: aws.String(point.Name()), + } + dimensions = append(dimensions, dimension) + } + return dimensions +} + +// buildWriteRecords builds the Timestream write records from Metric Fields only. +// Tags and time are not included - common attributes are built separately. +// Records with unsupported Metric Field type are skipped. +// It returns an array of Timestream write records. +func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record { + var records []types.Record + for fieldName, fieldValue := range point.Fields() { + stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) + if !ok { + t.Log.Errorf("Skipping field '%s'. The type '%s' is not supported in Timestream as MeasureValue. "+ + "Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]", + fieldName, reflect.TypeOf(fieldValue)) + continue + } + record := types.Record{ + MeasureName: aws.String(fieldName), + MeasureValueType: stringFieldValueType, + MeasureValue: aws.String(stringFieldValue), + } + records = append(records, record) + } + return records +} + +// partitionRecords splits the Timestream records into smaller slices of a max size +// so that are under the limit for the Timestream API call. +// It returns the array of array of records. +func partitionRecords(size int, records []types.Record) [][]types.Record { + numberOfPartitions := len(records) / size + if len(records)%size != 0 { + numberOfPartitions++ + } + + partitions := make([][]types.Record, numberOfPartitions) + + for i := 0; i < numberOfPartitions; i++ { + start := size * i + end := size * (i + 1) + if end > len(records) { + end = len(records) + } + + partitions[i] = records[start:end] + } + + return partitions +} + +// getTimestreamTime produces Timestream TimeUnit and TimeValue with minimum possible granularity +// while maintaining the same information. +func getTimestreamTime(t time.Time) (timeUnit types.TimeUnit, timeValue string) { + nanosTime := t.UnixNano() + if nanosTime%1e9 == 0 { + timeUnit = types.TimeUnitSeconds + timeValue = strconv.FormatInt(nanosTime/1e9, 10) + } else if nanosTime%1e6 == 0 { + timeUnit = types.TimeUnitMilliseconds + timeValue = strconv.FormatInt(nanosTime/1e6, 10) + } else if nanosTime%1e3 == 0 { + timeUnit = types.TimeUnitMicroseconds + timeValue = strconv.FormatInt(nanosTime/1e3, 10) + } else { + timeUnit = types.TimeUnitNanoseconds + timeValue = strconv.FormatInt(nanosTime, 10) + } + return +} + +// convertValue converts single Field value from Telegraf Metric and produces +// value, valueType Timestream representation. +func convertValue(v interface{}) (value string, valueType types.MeasureValueType, ok bool) { + ok = true + + switch t := v.(type) { + case int: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatInt(int64(t), 10) + case int8: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatInt(int64(t), 10) + case int16: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatInt(int64(t), 10) + case int32: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatInt(int64(t), 10) + case int64: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatInt(t, 10) + case uint: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatUint(uint64(t), 10) + case uint8: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatUint(uint64(t), 10) + case uint16: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatUint(uint64(t), 10) + case uint32: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatUint(uint64(t), 10) + case uint64: + valueType = types.MeasureValueTypeBigint + value = strconv.FormatUint(t, 10) + case float32: + valueType = types.MeasureValueTypeDouble + value = strconv.FormatFloat(float64(t), 'f', -1, 32) + case float64: + valueType = types.MeasureValueTypeDouble + value = strconv.FormatFloat(t, 'f', -1, 64) + case bool: + valueType = types.MeasureValueTypeBoolean + if t { + value = "true" + } else { + value = "false" + } + case string: + valueType = types.MeasureValueTypeVarchar + value = t + default: + // Skip unsupported type. + ok = false + return + } + return +} diff --git a/plugins/outputs/timestream/timestream_internal_test.go b/plugins/outputs/timestream/timestream_internal_test.go new file mode 100644 index 0000000000000..d151c10d4b146 --- /dev/null +++ b/plugins/outputs/timestream/timestream_internal_test.go @@ -0,0 +1,91 @@ +package timestream + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" + + "github.com/stretchr/testify/assert" +) + +func TestGetTimestreamTime(t *testing.T) { + assertions := assert.New(t) + + tWithNanos := time.Date(2020, time.November, 10, 23, 44, 20, 123, time.UTC) + tWithMicros := time.Date(2020, time.November, 10, 23, 44, 20, 123000, time.UTC) + tWithMillis := time.Date(2020, time.November, 10, 23, 44, 20, 123000000, time.UTC) + tOnlySeconds := time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC) + + tUnitNanos, tValueNanos := getTimestreamTime(tWithNanos) + assertions.Equal(types.TimeUnitNanoseconds, tUnitNanos) + assertions.Equal("1605051860000000123", tValueNanos) + + tUnitMicros, tValueMicros := getTimestreamTime(tWithMicros) + assertions.Equal(types.TimeUnitMicroseconds, tUnitMicros) + assertions.Equal("1605051860000123", tValueMicros) + + tUnitMillis, tValueMillis := getTimestreamTime(tWithMillis) + assertions.Equal(types.TimeUnitMilliseconds, tUnitMillis) + assertions.Equal("1605051860123", tValueMillis) + + tUnitSeconds, tValueSeconds := getTimestreamTime(tOnlySeconds) + assertions.Equal(types.TimeUnitSeconds, tUnitSeconds) + assertions.Equal("1605051860", tValueSeconds) +} + +func TestPartitionRecords(t *testing.T) { + assertions := assert.New(t) + + testDatum := types.Record{ + MeasureName: aws.String("Foo"), + MeasureValueType: types.MeasureValueTypeDouble, + MeasureValue: aws.String("123"), + } + + var zeroDatum []types.Record + oneDatum := []types.Record{testDatum} + twoDatum := []types.Record{testDatum, testDatum} + threeDatum := []types.Record{testDatum, testDatum, testDatum} + + assertions.Equal([][]types.Record{}, partitionRecords(2, zeroDatum)) + assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]types.Record{twoDatum}, partitionRecords(2, twoDatum)) + assertions.Equal([][]types.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) +} + +func TestConvertValueSupported(t *testing.T) { + intInputValues := []interface{}{-1, int8(-2), int16(-3), int32(-4), int64(-5)} + intOutputValues := []string{"-1", "-2", "-3", "-4", "-5"} + intOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint} + testConvertValueSupportedCases(t, intInputValues, intOutputValues, intOutputValueTypes) + + uintInputValues := []interface{}{uint(1), uint8(2), uint16(3), uint32(4), uint64(5)} + uintOutputValues := []string{"1", "2", "3", "4", "5"} + uintOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint} + testConvertValueSupportedCases(t, uintInputValues, uintOutputValues, uintOutputValueTypes) + + otherInputValues := []interface{}{"foo", float32(22.123), 22.1234, true} + otherOutputValues := []string{"foo", "22.123", "22.1234", "true"} + otherOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeVarchar, types.MeasureValueTypeDouble, types.MeasureValueTypeDouble, types.MeasureValueTypeBoolean} + testConvertValueSupportedCases(t, otherInputValues, otherOutputValues, otherOutputValueTypes) +} + +func TestConvertValueUnsupported(t *testing.T) { + assertions := assert.New(t) + _, _, ok := convertValue(time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC)) + assertions.False(ok, "Expected unsuccessful conversion") +} + +func testConvertValueSupportedCases(t *testing.T, + inputValues []interface{}, outputValues []string, outputValueTypes []types.MeasureValueType) { + assertions := assert.New(t) + for i, inputValue := range inputValues { + v, vt, ok := convertValue(inputValue) + assertions.Equal(true, ok, "Expected successful conversion") + assertions.Equal(outputValues[i], v, "Expected different string representation of converted value") + assertions.Equal(outputValueTypes[i], vt, "Expected different value type of converted value") + } +} diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go new file mode 100644 index 0000000000000..be61a06a15358 --- /dev/null +++ b/plugins/outputs/timestream/timestream_test.go @@ -0,0 +1,736 @@ +package timestream + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" + "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +const tsDbName = "testDb" + +const testSingleTableName = "SingleTableName" +const testSingleTableDim = "namespace" + +var time1 = time.Date(2009, time.November, 10, 22, 0, 0, 0, time.UTC) + +const time1Epoch = "1257890400" + +var time2 = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + +const time2Epoch = "1257894000" + +const metricName1 = "metricName1" +const metricName2 = "metricName2" + +type mockTimestreamClient struct{} + +func (m *mockTimestreamClient) CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) { + return nil, nil +} +func (m *mockTimestreamClient) WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) { + return nil, nil +} +func (m *mockTimestreamClient) DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) { + return nil, fmt.Errorf("hello from DescribeDatabase") +} + +func TestConnectValidatesConfigParameters(t *testing.T) { + assertions := assert.New(t) + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + return &mockTimestreamClient{}, nil + } + // checking base arguments + noDatabaseName := Timestream{Log: testutil.Logger{}} + assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName") + + noMappingMode := Timestream{ + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + assertions.Contains(noMappingMode.Connect().Error(), "MappingMode") + + incorrectMappingMode := Timestream{ + DatabaseName: tsDbName, + MappingMode: "foo", + Log: testutil.Logger{}, + } + assertions.Contains(incorrectMappingMode.Connect().Error(), "single-table") + + // multi-table arguments + validMappingModeMultiTable := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeMultiTable, + Log: testutil.Logger{}, + } + assertions.Nil(validMappingModeMultiTable.Connect()) + + singleTableNameWithMultiTable := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeMultiTable, + SingleTableName: testSingleTableName, + Log: testutil.Logger{}, + } + assertions.Contains(singleTableNameWithMultiTable.Connect().Error(), "SingleTableName") + + singleTableDimensionWithMultiTable := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeMultiTable, + SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, + Log: testutil.Logger{}, + } + assertions.Contains(singleTableDimensionWithMultiTable.Connect().Error(), + "SingleTableDimensionNameForTelegrafMeasurementName") + + // single-table arguments + noTableNameMappingModeSingleTable := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeSingleTable, + Log: testutil.Logger{}, + } + assertions.Contains(noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName") + + noDimensionNameMappingModeSingleTable := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeSingleTable, + SingleTableName: testSingleTableName, + Log: testutil.Logger{}, + } + assertions.Contains(noDimensionNameMappingModeSingleTable.Connect().Error(), + "SingleTableDimensionNameForTelegrafMeasurementName") + + validConfigurationMappingModeSingleTable := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeSingleTable, + SingleTableName: testSingleTableName, + SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, + Log: testutil.Logger{}, + } + assertions.Nil(validConfigurationMappingModeSingleTable.Connect()) + + // create table arguments + createTableNoMagneticRetention := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeMultiTable, + CreateTableIfNotExists: true, + Log: testutil.Logger{}, + } + assertions.Contains(createTableNoMagneticRetention.Connect().Error(), + "CreateTableMagneticStoreRetentionPeriodInDays") + + createTableNoMemoryRetention := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeMultiTable, + CreateTableIfNotExists: true, + CreateTableMagneticStoreRetentionPeriodInDays: 3, + Log: testutil.Logger{}, + } + assertions.Contains(createTableNoMemoryRetention.Connect().Error(), + "CreateTableMemoryStoreRetentionPeriodInHours") + + createTableValid := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeMultiTable, + CreateTableIfNotExists: true, + CreateTableMagneticStoreRetentionPeriodInDays: 3, + CreateTableMemoryStoreRetentionPeriodInHours: 3, + Log: testutil.Logger{}, + } + assertions.Nil(createTableValid.Connect()) + + // describe table on start arguments + describeTableInvoked := Timestream{ + DatabaseName: tsDbName, + MappingMode: MappingModeMultiTable, + DescribeDatabaseOnStart: true, + Log: testutil.Logger{}, + } + assertions.Contains(describeTableInvoked.Connect().Error(), "hello from DescribeDatabase") +} + +type mockTimestreamErrorClient struct { + ErrorToReturnOnWriteRecords error +} + +func (m *mockTimestreamErrorClient) CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) { + return nil, nil +} +func (m *mockTimestreamErrorClient) WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) { + return nil, m.ErrorToReturnOnWriteRecords +} +func (m *mockTimestreamErrorClient) DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) { + return nil, nil +} + +func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { + assertions := assert.New(t) + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + return &mockTimestreamErrorClient{ + ErrorToReturnOnWriteRecords: &types.ThrottlingException{Message: aws.String("Throttling Test")}, + }, nil + } + + plugin := Timestream{ + MappingMode: MappingModeMultiTable, + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + assertions.NoError(plugin.Connect()) + input := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value": float64(1)}, + time1, + ) + + err := plugin.Write([]telegraf.Metric{input}) + + assertions.NotNil(err, "Expected an error to be returned to Telegraf, "+ + "so that the write will be retried by Telegraf later.") +} + +func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { + assertions := assert.New(t) + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + return &mockTimestreamErrorClient{ + ErrorToReturnOnWriteRecords: &types.RejectedRecordsException{Message: aws.String("RejectedRecords Test")}, + }, nil + } + + plugin := Timestream{ + MappingMode: MappingModeMultiTable, + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + assertions.NoError(plugin.Connect()) + input := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value": float64(1)}, + time1, + ) + + err := plugin.Write([]telegraf.Metric{input}) + + assertions.Nil(err, "Expected to silently swallow the RejectedRecordsException, "+ + "as retrying this error doesn't make sense.") +} + +func TestTransformMetricsSkipEmptyMetric(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{}, //no fields here + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag2": "value2"}, + map[string]interface{}{ + "value": float64(10), + }, + time1, + ) + input3 := testutil.MustMetric( + metricName1, + map[string]string{}, //record with no dimensions should appear in the results + map[string]interface{}{ + "value": float64(20), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value": "10"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{testSingleTableDim: metricName1}, + measureValues: map[string]string{"value": "20"}, + }) + comparisonTest(t, MappingModeSingleTable, + []telegraf.Metric{input1, input2, input3}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag2": "value2"}, + measureValues: map[string]string{"value": "10"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{}, + measureValues: map[string]string{"value": "20"}, + }) + comparisonTest(t, MappingModeMultiTable, + []telegraf.Metric{input1, input2, input3}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { + const maxRecordsInWriteRecordsCall = 100 + + var inputs []telegraf.Metric + for i := 1; i <= maxRecordsInWriteRecordsCall+1; i++ { + fieldName := "value_supported" + strconv.Itoa(i) + inputs = append(inputs, testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + fieldName: float64(10), + }, + time1, + )) + } + + resultFields := make(map[string]string) + for i := 1; i <= maxRecordsInWriteRecordsCall; i++ { + fieldName := "value_supported" + strconv.Itoa(i) + resultFields[fieldName] = "10" + } + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: resultFields, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, + }) + comparisonTest(t, MappingModeSingleTable, + inputs, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: resultFields, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, + }) + comparisonTest(t, MappingModeMultiTable, + inputs, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag2": "value2"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag2": "value2"}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparate(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value2"}, + map[string]interface{}{ + "value_supported1": float64(20), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value2", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "20"}, + }) + + comparisonTest(t, MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value2"}, + measureValues: map[string]string{"value_supported1": "20"}, + }) + + comparisonTest(t, MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time2, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time2Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time2Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time1, + ) + + expectedResultSingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) + + expectedResultMultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) +} + +func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTableMapping(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName2, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName2}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName2, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { + metricWithUnsupportedField := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_unsupported": time.Now(), + }, + time1, + ) + expectedResultSingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + + comparisonTest(t, MappingModeSingleTable, + []telegraf.Metric{metricWithUnsupportedField}, + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) + + expectedResultMultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + + comparisonTest(t, MappingModeMultiTable, + []telegraf.Metric{metricWithUnsupportedField}, + []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) +} + +func comparisonTest(t *testing.T, + mappingMode string, + telegrafMetrics []telegraf.Metric, + timestreamRecords []*timestreamwrite.WriteRecordsInput, +) { + var plugin Timestream + switch mappingMode { + case MappingModeSingleTable: + plugin = Timestream{ + MappingMode: mappingMode, + DatabaseName: tsDbName, + + SingleTableName: testSingleTableName, + SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, + Log: testutil.Logger{}, + } + case MappingModeMultiTable: + plugin = Timestream{ + MappingMode: mappingMode, + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + } + assertions := assert.New(t) + + result := plugin.TransformMetrics(telegrafMetrics) + + assertions.Equal(len(timestreamRecords), len(result), "The number of transformed records was expected to be different") + for _, tsRecord := range timestreamRecords { + assertions.True(arrayContains(result, tsRecord), "Expected that the list of requests to Timestream: \n%s\n\n "+ + "will contain request: \n%s\n\nUsed MappingMode: %s", result, tsRecord, mappingMode) + } +} + +func arrayContains( + array []*timestreamwrite.WriteRecordsInput, + element *timestreamwrite.WriteRecordsInput, +) bool { + sortWriteInputForComparison(*element) + + for _, a := range array { + sortWriteInputForComparison(*a) + + if reflect.DeepEqual(a, element) { + return true + } + } + return false +} + +func sortWriteInputForComparison(element timestreamwrite.WriteRecordsInput) { + // sort the records by MeasureName, as they are kept in an array, but the order of records doesn't matter + sort.Slice(element.Records, func(i, j int) bool { + return strings.Compare(*element.Records[i].MeasureName, *element.Records[j].MeasureName) < 0 + }) + // sort the dimensions in CommonAttributes + if element.CommonAttributes != nil { + sort.Slice(element.CommonAttributes.Dimensions, func(i, j int) bool { + return strings.Compare(*element.CommonAttributes.Dimensions[i].Name, + *element.CommonAttributes.Dimensions[j].Name) < 0 + }) + } + // sort the dimensions in Records + for _, r := range element.Records { + sort.Slice(r.Dimensions, func(i, j int) bool { + return strings.Compare(*r.Dimensions[i].Name, *r.Dimensions[j].Name) < 0 + }) + } +} + +type SimpleInput struct { + t string + tableName string + dimensions map[string]string + measureValues map[string]string +} + +func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { + var tsDimensions []types.Dimension + for k, v := range i.dimensions { + tsDimensions = append(tsDimensions, types.Dimension{ + Name: aws.String(k), + Value: aws.String(v), + }) + } + + var tsRecords []types.Record + for k, v := range i.measureValues { + tsRecords = append(tsRecords, types.Record{ + MeasureName: aws.String(k), + MeasureValue: aws.String(v), + MeasureValueType: types.MeasureValueTypeDouble, + }) + } + + result := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(i.tableName), + Records: tsRecords, + CommonAttributes: &types.Record{ + Dimensions: tsDimensions, + Time: aws.String(i.t), + TimeUnit: types.TimeUnitSeconds, + }, + } + + return result +} diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index b5996f6380a40..4d3027b1b5331 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -3,17 +3,18 @@ package warp10 import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "math" "net/http" + "net/url" "sort" "strconv" "strings" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -24,12 +25,12 @@ const ( // Warp10 output plugin type Warp10 struct { - Prefix string `toml:"prefix"` - WarpURL string `toml:"warp_url"` - Token string `toml:"token"` - Timeout internal.Duration `toml:"timeout"` - PrintErrorBody bool `toml:"print_error_body"` - MaxStringErrorSize int `toml:"max_string_error_size"` + Prefix string `toml:"prefix"` + WarpURL string `toml:"warp_url"` + Token string `toml:"token"` + Timeout config.Duration `toml:"timeout"` + PrintErrorBody bool `toml:"print_error_body"` + MaxStringErrorSize int `toml:"max_string_error_size"` client *http.Client tls.ClientConfig } @@ -75,8 +76,8 @@ func (w *Warp10) createClient() (*http.Client, error) { return nil, err } - if w.Timeout.Duration == 0 { - w.Timeout.Duration = defaultClientTimeout + if w.Timeout == 0 { + w.Timeout = config.Duration(defaultClientTimeout) } client := &http.Client{ @@ -84,7 +85,7 @@ func (w *Warp10) createClient() (*http.Client, error) { TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, }, - Timeout: w.Timeout.Duration, + Timeout: time.Duration(w.Timeout), } return client, nil @@ -105,9 +106,7 @@ func (w *Warp10) Connect() error { func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric) string { collectString := make([]string, 0) for _, mm := range metrics { - for _, field := range mm.FieldList() { - metric := &MetricLine{ Metric: fmt.Sprintf("%s%s", w.Prefix, mm.Name()+"."+field.Key), Timestamp: mm.Time().UnixNano() / 1000, @@ -155,7 +154,7 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error { if resp.StatusCode != http.StatusOK { if w.PrintErrorBody { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize)) } @@ -170,15 +169,16 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error { } func buildTags(tags []*telegraf.Tag) []string { - tagsString := make([]string, len(tags)+1) indexSource := 0 for index, tag := range tags { - tagsString[index] = fmt.Sprintf("%s=%s", tag.Key, tag.Value) + key := url.QueryEscape(tag.Key) + value := url.QueryEscape(tag.Value) + tagsString[index] = fmt.Sprintf("%s=%s", key, value) indexSource = index } indexSource++ - tagsString[indexSource] = fmt.Sprintf("source=telegraf") + tagsString[indexSource] = "source=telegraf" sort.Strings(tagsString) return tagsString } @@ -214,10 +214,6 @@ func boolToString(inputBool bool) string { return strconv.FormatBool(inputBool) } -func uIntToString(inputNum uint64) string { - return strconv.FormatUint(inputNum, 10) -} - func floatToString(inputNum float64) string { return strconv.FormatFloat(inputNum, 'f', 6, 64) } diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go index 5b543b34c0d8b..3fd08055fbb02 100644 --- a/plugins/outputs/warp10/warp10_test.go +++ b/plugins/outputs/warp10/warp10_test.go @@ -1,7 +1,6 @@ package warp10 import ( - "fmt" "testing" "github.com/influxdata/telegraf/testutil" @@ -24,6 +23,22 @@ func TestWriteWarp10(t *testing.T) { require.Exactly(t, "1257894000000000// unit.testtest1.value{source=telegraf,tag1=value1} 1.000000\n", payload) } +func TestWriteWarp10EncodedTags(t *testing.T) { + w := Warp10{ + Prefix: "unit.test", + WarpURL: "http://localhost:8090", + Token: "WRITE", + } + + metrics := testutil.MockMetrics() + for _, metric := range metrics { + metric.AddTag("encoded{tag", "value1,value2") + } + + payload := w.GenWarp10Payload(metrics) + require.Exactly(t, "1257894000000000// unit.testtest1.value{encoded%7Btag=value1%2Cvalue2,source=telegraf,tag1=value1} 1.000000\n", payload) +} + func TestHandleWarp10Error(t *testing.T) { w := Warp10{ Prefix: "unit.test", @@ -44,7 +59,7 @@ func TestHandleWarp10Error(t *testing.T) { `, - Expected: fmt.Sprintf("Invalid token"), + Expected: "Invalid token", }, { Message: ` @@ -59,7 +74,7 @@ func TestHandleWarp10Error(t *testing.T) { `, - Expected: fmt.Sprintf("Token Expired"), + Expected: "Token Expired", }, { Message: ` @@ -74,7 +89,7 @@ func TestHandleWarp10Error(t *testing.T) { `, - Expected: fmt.Sprintf("Token revoked"), + Expected: "Token revoked", }, { Message: ` @@ -101,5 +116,4 @@ func TestHandleWarp10Error(t *testing.T) { payload := w.HandleError(handledError.Message, 511) require.Exactly(t, handledError.Expected, payload) } - } diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index 2daca328cd577..8439295bbe029 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -49,6 +49,12 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. #truncate_tags = false + + ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics + ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending + ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in + ## Telegraf. + #immediate_flush = true ``` diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 523549fb127e2..3ad4e803b9f6a 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -13,23 +13,24 @@ import ( const maxTagLength = 254 type Wavefront struct { - Url string - Token string - Host string - Port int - Prefix string - SimpleFields bool - MetricSeparator string - ConvertPaths bool - ConvertBool bool - UseRegex bool - UseStrict bool - TruncateTags bool - SourceOverride []string - StringToNumber map[string][]map[string]float64 + URL string `toml:"url"` + Token string `toml:"token"` + Host string `toml:"host"` + Port int `toml:"port"` + Prefix string `toml:"prefix"` + SimpleFields bool `toml:"simple_fields"` + MetricSeparator string `toml:"metric_separator"` + ConvertPaths bool `toml:"convert_paths"` + ConvertBool bool `toml:"convert_bool"` + UseRegex bool `toml:"use_regex"` + UseStrict bool `toml:"use_strict"` + TruncateTags bool `toml:"truncate_tags"` + ImmediateFlush bool `toml:"immediate_flush"` + SourceOverride []string `toml:"source_override"` + StringToNumber map[string][]map[string]float64 `toml:"string_to_number"` sender wavefront.Sender - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` } // catch many of the invalid chars that could appear in a metric or tag name @@ -101,6 +102,12 @@ var sampleConfig = ` ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. #truncate_tags = false + ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics + ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending + ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in + ## Telegraf. + #immediate_flush = true + ## Define a mapping, namespaced by metric prefix, from string values to numeric values ## deprecated in 1.9; use the enum processor plugin #[[outputs.wavefront.string_to_number.elasticsearch]] @@ -118,20 +125,23 @@ type MetricPoint struct { } func (w *Wavefront) Connect() error { - if len(w.StringToNumber) > 0 { w.Log.Warn("The string_to_number option is deprecated; please use the enum processor instead") } - if w.Url != "" { - w.Log.Debug("connecting over http/https using Url: %s", w.Url) + flushSeconds := 5 + if w.ImmediateFlush { + flushSeconds = 86400 // Set a very long flush interval if we're flushing directly + } + if w.URL != "" { + w.Log.Debug("connecting over http/https using Url: %s", w.URL) sender, err := wavefront.NewDirectSender(&wavefront.DirectConfiguration{ - Server: w.Url, + Server: w.URL, Token: w.Token, - FlushIntervalSeconds: 5, + FlushIntervalSeconds: flushSeconds, }) if err != nil { - return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Url: %s", w.Url) + return fmt.Errorf("could not create Wavefront Sender for Url: %s", w.URL) } w.sender = sender } else { @@ -139,10 +149,10 @@ func (w *Wavefront) Connect() error { sender, err := wavefront.NewProxySender(&wavefront.ProxyConfiguration{ Host: w.Host, MetricsPort: w.Port, - FlushIntervalSeconds: 5, + FlushIntervalSeconds: flushSeconds, }) if err != nil { - return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Host: %q and Port: %d", w.Host, w.Port) + return fmt.Errorf("could not create Wavefront Sender for Host: %q and Port: %d", w.Host, w.Port) } w.sender = sender } @@ -157,15 +167,22 @@ func (w *Wavefront) Connect() error { } func (w *Wavefront) Write(metrics []telegraf.Metric) error { - for _, m := range metrics { for _, point := range w.buildMetrics(m) { err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) if err != nil { - return fmt.Errorf("Wavefront sending error: %s", err.Error()) + if isRetryable(err) { + return fmt.Errorf("wavefront sending error: %v", err) + } + w.Log.Errorf("non-retryable error during Wavefront.Write: %v", err) + w.Log.Debugf("Non-retryable metric data: Name: %v, Value: %v, Timestamp: %v, Source: %v, PointTags: %v ", point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) } } } + if w.ImmediateFlush { + w.Log.Debugf("Flushing batch of %d points", len(metrics)) + return w.sender.Flush() + } return nil } @@ -214,7 +231,6 @@ func (w *Wavefront) buildMetrics(m telegraf.Metric) []*MetricPoint { } func (w *Wavefront) buildTags(mTags map[string]string) (string, map[string]string) { - // Remove all empty tags. for k, v := range mTags { if v == "" { @@ -287,9 +303,8 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { if w.ConvertBool { if p { return 1, nil - } else { - return 0, nil } + return 0, nil } case int64: return float64(v.(int64)), nil @@ -301,7 +316,7 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { for prefix, mappings := range w.StringToNumber { if strings.HasPrefix(name, prefix) { for _, mapping := range mappings { - val, hasVal := mapping[string(p)] + val, hasVal := mapping[p] if hasVal { return val, nil } @@ -336,6 +351,25 @@ func init() { ConvertPaths: true, ConvertBool: true, TruncateTags: false, + ImmediateFlush: true, } }) } + +// TODO: Currently there's no canonical way to exhaust all +// retryable/non-retryable errors from wavefront, so this implementation just +// handles known non-retryable errors in a case-by-case basis and assumes all +// other errors are retryable. +// A support ticket has been filed against wavefront to provide a canonical way +// to distinguish between retryable and non-retryable errors (link is not +// public). +func isRetryable(err error) bool { + if err != nil { + // "empty metric name" errors are non-retryable as retry will just keep + // getting the same error again and again. + if strings.Contains(err.Error(), "empty metric name") { + return false + } + } + return true +} diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index 40707e6d6c8b0..d745108dc7e94 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -1,14 +1,15 @@ package wavefront import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "reflect" "strings" "testing" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) // default config used by Tests @@ -32,7 +33,7 @@ func TestBuildMetrics(t *testing.T) { pathReplacer = strings.NewReplacer("_", w.MetricSeparator) - testMetric1, _ := metric.New( + testMetric1 := metric.New( "test.simple.metric", map[string]string{"tag1": "value1", "host": "testHost"}, map[string]interface{}{"value": 123}, @@ -73,7 +74,6 @@ func TestBuildMetrics(t *testing.T) { } } } - } func TestBuildMetricsStrict(t *testing.T) { @@ -113,7 +113,6 @@ func TestBuildMetricsStrict(t *testing.T) { } } } - } func TestBuildMetricsWithSimpleFields(t *testing.T) { @@ -123,7 +122,7 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) { pathReplacer = strings.NewReplacer("_", w.MetricSeparator) - testMetric1, _ := metric.New( + testMetric1 := metric.New( "test.simple.metric", map[string]string{"tag1": "value1"}, map[string]interface{}{"value": 123}, @@ -152,11 +151,9 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) { } } } - } func TestBuildTags(t *testing.T) { - w := defaultWavefront() var tagtests = []struct { @@ -284,7 +281,6 @@ func TestBuildValue(t *testing.T) { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", vt.out, value) } } - } func TestBuildValueString(t *testing.T) { @@ -315,7 +311,6 @@ func TestBuildValueString(t *testing.T) { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", vt.out, value) } } - } func TestTagLimits(t *testing.T) { diff --git a/plugins/outputs/websocket/README.md b/plugins/outputs/websocket/README.md new file mode 100644 index 0000000000000..577c10e6b0083 --- /dev/null +++ b/plugins/outputs/websocket/README.md @@ -0,0 +1,39 @@ +# Websocket Output Plugin + +This plugin can write to a WebSocket endpoint. + +It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). + +### Configuration: + +```toml +# A plugin that can transmit metrics over WebSocket. +[[outputs.websocket]] + ## URL is the address to send metrics to. Make sure ws or wss scheme is used. + url = "ws://127.0.0.1:3000/telegraf" + + ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). + # connect_timeout = "30s" + # write_timeout = "30s" + # read_timeout = "30s" + + ## Optionally turn on using text data frames (binary by default). + # use_text_frames = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" + + ## Additional HTTP Upgrade headers + # [outputs.websocket.headers] + # Authorization = "Bearer " +``` diff --git a/plugins/outputs/websocket/websocket.go b/plugins/outputs/websocket/websocket.go new file mode 100644 index 0000000000000..17aea0542c6aa --- /dev/null +++ b/plugins/outputs/websocket/websocket.go @@ -0,0 +1,225 @@ +package websocket + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/proxy" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + + ws "github.com/gorilla/websocket" +) + +var sampleConfig = ` + ## URL is the address to send metrics to. Make sure ws or wss scheme is used. + url = "ws://127.0.0.1:8080/telegraf" + + ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). + # connect_timeout = "30s" + # write_timeout = "30s" + # read_timeout = "30s" + + ## Optionally turn on using text data frames (binary by default). + # use_text_frames = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" + + ## Additional HTTP Upgrade headers + # [outputs.websocket.headers] + # Authorization = "Bearer " +` + +const ( + defaultConnectTimeout = 30 * time.Second + defaultWriteTimeout = 30 * time.Second + defaultReadTimeout = 30 * time.Second +) + +// WebSocket can output to WebSocket endpoint. +type WebSocket struct { + URL string `toml:"url"` + ConnectTimeout config.Duration `toml:"connect_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + ReadTimeout config.Duration `toml:"read_timeout"` + Headers map[string]string `toml:"headers"` + UseTextFrames bool `toml:"use_text_frames"` + Log telegraf.Logger `toml:"-"` + proxy.HTTPProxy + tls.ClientConfig + + conn *ws.Conn + serializer serializers.Serializer +} + +// SetSerializer implements serializers.SerializerOutput. +func (w *WebSocket) SetSerializer(serializer serializers.Serializer) { + w.serializer = serializer +} + +// Description of plugin. +func (w *WebSocket) Description() string { + return "Generic WebSocket output writer." +} + +// SampleConfig returns plugin config sample. +func (w *WebSocket) SampleConfig() string { + return sampleConfig +} + +var errInvalidURL = errors.New("invalid websocket URL") + +// Init the output plugin. +func (w *WebSocket) Init() error { + if parsedURL, err := url.Parse(w.URL); err != nil || (parsedURL.Scheme != "ws" && parsedURL.Scheme != "wss") { + return fmt.Errorf("%w: \"%s\"", errInvalidURL, w.URL) + } + return nil +} + +// Connect to the output endpoint. +func (w *WebSocket) Connect() error { + tlsCfg, err := w.ClientConfig.TLSConfig() + if err != nil { + return fmt.Errorf("error creating TLS config: %v", err) + } + + dialProxy, err := w.HTTPProxy.Proxy() + if err != nil { + return fmt.Errorf("error creating proxy: %v", err) + } + + dialer := &ws.Dialer{ + Proxy: dialProxy, + HandshakeTimeout: time.Duration(w.ConnectTimeout), + TLSClientConfig: tlsCfg, + } + + headers := http.Header{} + for k, v := range w.Headers { + headers.Set(k, v) + } + + conn, resp, err := dialer.Dial(w.URL, headers) + if err != nil { + return fmt.Errorf("error dial: %v", err) + } + _ = resp.Body.Close() + if resp.StatusCode != http.StatusSwitchingProtocols { + return fmt.Errorf("wrong status code while connecting to server: %d", resp.StatusCode) + } + + w.conn = conn + go w.read(conn) + + return nil +} + +func (w *WebSocket) read(conn *ws.Conn) { + defer func() { _ = conn.Close() }() + if w.ReadTimeout > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(w.ReadTimeout))); err != nil { + w.Log.Errorf("error setting read deadline: %v", err) + return + } + conn.SetPingHandler(func(string) error { + err := conn.SetReadDeadline(time.Now().Add(time.Duration(w.ReadTimeout))) + if err != nil { + w.Log.Errorf("error setting read deadline: %v", err) + return err + } + return conn.WriteControl(ws.PongMessage, nil, time.Now().Add(time.Duration(w.WriteTimeout))) + }) + } + for { + // Need to read a connection (to properly process pings from a server). + _, _, err := conn.ReadMessage() + if err != nil { + // Websocket connection is not readable after first error, it's going to error state. + // In the beginning of this goroutine we have defer section that closes such connection. + // After that connection will be tried to reestablish on next Write. + if ws.IsUnexpectedCloseError(err, ws.CloseGoingAway, ws.CloseAbnormalClosure) { + w.Log.Errorf("error reading websocket connection: %v", err) + } + return + } + if w.ReadTimeout > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(w.ReadTimeout))); err != nil { + return + } + } + } +} + +// Write writes the given metrics to the destination. Not thread-safe. +func (w *WebSocket) Write(metrics []telegraf.Metric) error { + if w.conn == nil { + // Previous write failed with error and ws conn was closed. + if err := w.Connect(); err != nil { + return err + } + } + + messageData, err := w.serializer.SerializeBatch(metrics) + if err != nil { + return err + } + + if w.WriteTimeout > 0 { + if err := w.conn.SetWriteDeadline(time.Now().Add(time.Duration(w.WriteTimeout))); err != nil { + return fmt.Errorf("error setting write deadline: %v", err) + } + } + messageType := ws.BinaryMessage + if w.UseTextFrames { + messageType = ws.TextMessage + } + err = w.conn.WriteMessage(messageType, messageData) + if err != nil { + _ = w.conn.Close() + w.conn = nil + return fmt.Errorf("error writing to connection: %v", err) + } + return nil +} + +// Close closes the connection. Noop if already closed. +func (w *WebSocket) Close() error { + if w.conn == nil { + return nil + } + err := w.conn.Close() + w.conn = nil + return err +} + +func newWebSocket() *WebSocket { + return &WebSocket{ + ConnectTimeout: config.Duration(defaultConnectTimeout), + WriteTimeout: config.Duration(defaultWriteTimeout), + ReadTimeout: config.Duration(defaultReadTimeout), + } +} + +func init() { + outputs.Add("websocket", func() telegraf.Output { + return newWebSocket() + }) +} diff --git a/plugins/outputs/websocket/websocket_test.go b/plugins/outputs/websocket/websocket_test.go new file mode 100644 index 0000000000000..a6c74a77dd38a --- /dev/null +++ b/plugins/outputs/websocket/websocket_test.go @@ -0,0 +1,221 @@ +package websocket + +import ( + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + + ws "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" +) + +// testSerializer serializes to a number of metrics to simplify tests here. +type testSerializer struct{} + +func newTestSerializer() *testSerializer { + return &testSerializer{} +} + +func (t testSerializer) Serialize(_ telegraf.Metric) ([]byte, error) { + return []byte("1"), nil +} + +func (t testSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + return []byte(strconv.Itoa(len(metrics))), nil +} + +type testServer struct { + *httptest.Server + t *testing.T + messages chan []byte + upgradeDelay time.Duration + expectTextFrames bool +} + +func newTestServer(t *testing.T, messages chan []byte, tls bool) *testServer { + s := &testServer{} + s.t = t + if tls { + s.Server = httptest.NewTLSServer(s) + } else { + s.Server = httptest.NewServer(s) + } + s.URL = makeWsProto(s.Server.URL) + s.messages = messages + return s +} + +func makeWsProto(s string) string { + return "ws" + strings.TrimPrefix(s, "http") +} + +const ( + testHeaderName = "X-Telegraf-Test" + testHeaderValue = "1" +) + +var testUpgrader = ws.Upgrader{} + +func (s *testServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Header.Get(testHeaderName) != testHeaderValue { + s.t.Fatalf("expected test header found in request, got: %#v", r.Header) + } + if s.upgradeDelay > 0 { + // Emulate long handshake. + select { + case <-r.Context().Done(): + return + case <-time.After(s.upgradeDelay): + } + } + conn, err := testUpgrader.Upgrade(w, r, http.Header{}) + if err != nil { + return + } + defer func() { _ = conn.Close() }() + + for { + messageType, data, err := conn.ReadMessage() + if err != nil { + break + } + if s.expectTextFrames && messageType != ws.TextMessage { + s.t.Fatalf("unexpected frame type: %d", messageType) + } + select { + case s.messages <- data: + case <-time.After(5 * time.Second): + s.t.Fatal("timeout writing to messages channel, make sure there are readers") + } + } +} + +func initWebSocket(s *testServer) *WebSocket { + w := newWebSocket() + w.Log = testutil.Logger{} + w.URL = s.URL + w.Headers = map[string]string{testHeaderName: testHeaderValue} + w.SetSerializer(newTestSerializer()) + return w +} + +func connect(t *testing.T, w *WebSocket) { + err := w.Connect() + require.NoError(t, err) +} + +func TestWebSocket_NoURL(t *testing.T) { + w := newWebSocket() + err := w.Init() + require.ErrorIs(t, err, errInvalidURL) +} + +func TestWebSocket_Connect_Timeout(t *testing.T) { + s := newTestServer(t, nil, false) + s.upgradeDelay = time.Second + defer s.Close() + w := initWebSocket(s) + w.ConnectTimeout = config.Duration(10 * time.Millisecond) + err := w.Connect() + require.Error(t, err) +} + +func TestWebSocket_Connect_OK(t *testing.T) { + s := newTestServer(t, nil, false) + defer s.Close() + w := initWebSocket(s) + connect(t, w) +} + +func TestWebSocket_ConnectTLS_OK(t *testing.T) { + s := newTestServer(t, nil, true) + defer s.Close() + w := initWebSocket(s) + w.ClientConfig.InsecureSkipVerify = true + connect(t, w) +} + +func TestWebSocket_Write_OK(t *testing.T) { + messages := make(chan []byte, 1) + + s := newTestServer(t, messages, false) + defer s.Close() + + w := initWebSocket(s) + connect(t, w) + + var metrics []telegraf.Metric + metrics = append(metrics, testutil.TestMetric(0.4, "test")) + metrics = append(metrics, testutil.TestMetric(0.5, "test")) + err := w.Write(metrics) + require.NoError(t, err) + + select { + case data := <-messages: + require.Equal(t, []byte("2"), data) + case <-time.After(time.Second): + t.Fatal("timeout receiving data") + } +} + +func TestWebSocket_Write_Error(t *testing.T) { + s := newTestServer(t, nil, false) + defer s.Close() + + w := initWebSocket(s) + connect(t, w) + + require.NoError(t, w.conn.Close()) + + metrics := []telegraf.Metric{testutil.TestMetric(0.4, "test")} + err := w.Write(metrics) + require.Error(t, err) + require.Nil(t, w.conn) +} + +func TestWebSocket_Write_Reconnect(t *testing.T) { + messages := make(chan []byte, 1) + s := newTestServer(t, messages, false) + s.expectTextFrames = true // Also use text frames in this test. + defer s.Close() + + w := initWebSocket(s) + w.UseTextFrames = true + connect(t, w) + + metrics := []telegraf.Metric{testutil.TestMetric(0.4, "test")} + + require.NoError(t, w.conn.Close()) + + err := w.Write(metrics) + require.Error(t, err) + require.Nil(t, w.conn) + + err = w.Write(metrics) + require.NoError(t, err) + + select { + case data := <-messages: + require.Equal(t, []byte("1"), data) + case <-time.After(time.Second): + t.Fatal("timeout receiving data") + } +} + +func TestWebSocket_Close(t *testing.T) { + s := newTestServer(t, nil, false) + defer s.Close() + + w := initWebSocket(s) + connect(t, w) + require.NoError(t, w.Close()) + // Check no error on second close. + require.NoError(t, w.Close()) +} diff --git a/plugins/outputs/yandex_cloud_monitoring/README.md b/plugins/outputs/yandex_cloud_monitoring/README.md new file mode 100644 index 0000000000000..3bace22b4adb2 --- /dev/null +++ b/plugins/outputs/yandex_cloud_monitoring/README.md @@ -0,0 +1,26 @@ +# Yandex Cloud Monitoring + +This plugin will send custom metrics to Yandex Cloud Monitoring. +https://cloud.yandex.com/services/monitoring + +### Configuration: + +```toml +[[outputs.yandex_cloud_monitoring]] + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Yandex.Cloud monitoring API endpoint. Normally should not be changed + # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" + + ## All user metrics should be sent with "custom" service specified. Normally should not be changed + # service = "custom" +``` + +### Authentication + +This plugin currently support only YC.Compute metadata based authentication. + +When plugin is working inside a YC.Compute instance it will take IAM token and Folder ID from instance metadata. + +Other authentication methods will be added later. diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go new file mode 100644 index 0000000000000..dc097da45ac2a --- /dev/null +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -0,0 +1,259 @@ +package yandex_cloud_monitoring + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/selfstat" +) + +// YandexCloudMonitoring allows publishing of metrics to the Yandex Cloud Monitoring custom metrics +// service +type YandexCloudMonitoring struct { + Timeout config.Duration `toml:"timeout"` + EndpointURL string `toml:"endpoint_url"` + Service string `toml:"service"` + + Log telegraf.Logger + + MetadataTokenURL string + MetadataFolderURL string + FolderID string + IAMToken string + IamTokenExpirationTime time.Time + + client *http.Client + + timeFunc func() time.Time + + MetricOutsideWindow selfstat.Stat +} + +type yandexCloudMonitoringMessage struct { + TS string `json:"ts,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Metrics []yandexCloudMonitoringMetric `json:"metrics"` +} + +type yandexCloudMonitoringMetric struct { + Name string `json:"name"` + Labels map[string]string `json:"labels"` + MetricType string `json:"type,omitempty"` // DGAUGE|IGAUGE|COUNTER|RATE. Default: DGAUGE + TS string `json:"ts,omitempty"` + Value float64 `json:"value"` +} + +type MetadataIamToken struct { + AccessToken string `json:"access_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` +} + +const ( + defaultRequestTimeout = time.Second * 20 + defaultEndpointURL = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" + defaultMetadataTokenURL = "http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" + defaultMetadataFolderURL = "http://169.254.169.254/computeMetadata/v1/yandex/folder-id" +) + +var sampleConfig = ` + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Yandex.Cloud monitoring API endpoint. Normally should not be changed + # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" + + ## All user metrics should be sent with "custom" service specified. Normally should not be changed + # service = "custom" +` + +// Description provides a description of the plugin +func (a *YandexCloudMonitoring) Description() string { + return "Send aggregated metrics to Yandex.Cloud Monitoring" +} + +// SampleConfig provides a sample configuration for the plugin +func (a *YandexCloudMonitoring) SampleConfig() string { + return sampleConfig +} + +// Connect initializes the plugin and validates connectivity +func (a *YandexCloudMonitoring) Connect() error { + if a.Timeout <= 0 { + a.Timeout = config.Duration(defaultRequestTimeout) + } + if a.EndpointURL == "" { + a.EndpointURL = defaultEndpointURL + } + if a.Service == "" { + a.Service = "custom" + } + if a.MetadataTokenURL == "" { + a.MetadataTokenURL = defaultMetadataTokenURL + } + if a.MetadataFolderURL == "" { + a.MetadataFolderURL = defaultMetadataFolderURL + } + + a.client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + Timeout: time.Duration(a.Timeout), + } + + var err error + a.FolderID, err = a.getFolderIDFromMetadata() + if err != nil { + return err + } + + a.Log.Infof("Writing to Yandex.Cloud Monitoring URL: %s", a.EndpointURL) + + tags := map[string]string{} + a.MetricOutsideWindow = selfstat.Register("yandex_cloud_monitoring", "metric_outside_window", tags) + + return nil +} + +// Close shuts down an any active connections +func (a *YandexCloudMonitoring) Close() error { + a.client = nil + return nil +} + +// Write writes metrics to the remote endpoint +func (a *YandexCloudMonitoring) Write(metrics []telegraf.Metric) error { + var yandexCloudMonitoringMetrics []yandexCloudMonitoringMetric + for _, m := range metrics { + for _, field := range m.FieldList() { + yandexCloudMonitoringMetrics = append( + yandexCloudMonitoringMetrics, + yandexCloudMonitoringMetric{ + Name: field.Key, + Labels: m.Tags(), + TS: fmt.Sprint(m.Time().Format(time.RFC3339)), + Value: field.Value.(float64), + }, + ) + } + } + + var body []byte + jsonBytes, err := json.Marshal( + yandexCloudMonitoringMessage{ + Metrics: yandexCloudMonitoringMetrics, + }, + ) + + if err != nil { + return err + } + body = append(jsonBytes, '\n') + return a.send(body) +} + +func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) { + req, err := http.NewRequest("GET", metadataURL, nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Set("Metadata-Flavor", "Google") + resp, err := c.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode >= 300 || resp.StatusCode < 200 { + return nil, fmt.Errorf("unable to fetch instance metadata: [%s] %d", + metadataURL, resp.StatusCode) + } + return body, nil +} + +func (a *YandexCloudMonitoring) getFolderIDFromMetadata() (string, error) { + a.Log.Infof("getting folder ID in %s", a.MetadataFolderURL) + body, err := getResponseFromMetadata(a.client, a.MetadataFolderURL) + if err != nil { + return "", err + } + folderID := string(body) + if folderID == "" { + return "", fmt.Errorf("unable to fetch folder id from URL %s: %v", a.MetadataFolderURL, err) + } + return folderID, nil +} + +func (a *YandexCloudMonitoring) getIAMTokenFromMetadata() (string, int, error) { + a.Log.Debugf("getting new IAM token in %s", a.MetadataTokenURL) + body, err := getResponseFromMetadata(a.client, a.MetadataTokenURL) + if err != nil { + return "", 0, err + } + var metadata MetadataIamToken + if err := json.Unmarshal(body, &metadata); err != nil { + return "", 0, err + } + if metadata.AccessToken == "" || metadata.ExpiresIn == 0 { + return "", 0, fmt.Errorf("unable to fetch authentication credentials %s: %v", a.MetadataTokenURL, err) + } + return metadata.AccessToken, int(metadata.ExpiresIn), nil +} + +func (a *YandexCloudMonitoring) send(body []byte) error { + req, err := http.NewRequest("POST", a.EndpointURL, bytes.NewBuffer(body)) + if err != nil { + return err + } + q := req.URL.Query() + q.Add("folderId", a.FolderID) + q.Add("service", a.Service) + req.URL.RawQuery = q.Encode() + + req.Header.Set("Content-Type", "application/json") + isTokenExpired := !a.IamTokenExpirationTime.After(time.Now()) + if a.IAMToken == "" || isTokenExpired { + token, expiresIn, err := a.getIAMTokenFromMetadata() + if err != nil { + return err + } + a.IamTokenExpirationTime = time.Now().Add(time.Duration(expiresIn) * time.Second) + a.IAMToken = token + } + req.Header.Set("Authorization", "Bearer "+a.IAMToken) + + a.Log.Debugf("sending metrics to %s", req.URL.String()) + a.Log.Debugf("body: %s", body) + resp, err := a.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = io.ReadAll(resp.Body) + if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) + } + + return nil +} + +func init() { + outputs.Add("yandex_cloud_monitoring", func() telegraf.Output { + return &YandexCloudMonitoring{ + timeFunc: time.Now, + } + }) +} diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go new file mode 100644 index 0000000000000..a3a7ea04d60d4 --- /dev/null +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go @@ -0,0 +1,95 @@ +package yandex_cloud_monitoring + +import ( + "encoding/json" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +func TestWrite(t *testing.T) { + readBody := func(r *http.Request) yandexCloudMonitoringMessage { + decoder := json.NewDecoder(r.Body) + var message yandexCloudMonitoringMessage + err := decoder.Decode(&message) + require.NoError(t, err) + return message + } + + testMetadataHTTPServer := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/token") { + token := MetadataIamToken{ + AccessToken: "token1", + ExpiresIn: 123, + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + err := json.NewEncoder(w).Encode(token) + require.NoError(t, err) + } else if strings.HasSuffix(r.URL.Path, "/folder") { + _, err := io.WriteString(w, "folder1") + require.NoError(t, err) + } + w.WriteHeader(http.StatusOK) + }), + ) + defer testMetadataHTTPServer.Close() + metadataTokenURL := "http://" + testMetadataHTTPServer.Listener.Addr().String() + "/token" + metadataFolderURL := "http://" + testMetadataHTTPServer.Listener.Addr().String() + "/folder" + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + url := "http://" + ts.Listener.Addr().String() + "/metrics" + + tests := []struct { + name string + plugin *YandexCloudMonitoring + metrics []telegraf.Metric + handler func(t *testing.T, w http.ResponseWriter, r *http.Request) + }{ + { + name: "metric is converted to json value", + plugin: &YandexCloudMonitoring{}, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cluster", + map[string]string{}, + map[string]interface{}{ + "cpu": 42.0, + }, + time.Unix(0, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + message := readBody(r) + require.Len(t, message.Metrics, 1) + require.Equal(t, "cpu", message.Metrics[0].Name) + require.Equal(t, 42.0, message.Metrics[0].Value) + w.WriteHeader(http.StatusOK) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tt.handler(t, w, r) + }) + tt.plugin.Log = testutil.Logger{} + tt.plugin.EndpointURL = url + tt.plugin.MetadataTokenURL = metadataTokenURL + tt.plugin.MetadataFolderURL = metadataFolderURL + err := tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write(tt.metrics) + + require.NoError(t, err) + }) + } +} diff --git a/plugins/parsers/collectd/README.md b/plugins/parsers/collectd/README.md index cc7daa4f6af42..8dbc052be145d 100644 --- a/plugins/parsers/collectd/README.md +++ b/plugins/parsers/collectd/README.md @@ -39,7 +39,7 @@ You can also change the path to the typesdb or add additional typesdb using ## Multi-value plugins can be handled two ways. ## "split" will parse and store the multi-value plugin data into separate measurements ## "join" will parse and store the multi-value plugin as a single multi-value measurement. - ## "split" is the default behavior for backward compatability with previous versions of influxdb. + ## "split" is the default behavior for backward compatibility with previous versions of influxdb. collectd_parse_multivalue = "split" ``` diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index 6b7fbd7566d12..f0f9773472c4f 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -76,7 +76,7 @@ func NewCollectdParser( func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) { valueLists, err := network.Parse(buf, p.popts) if err != nil { - return nil, fmt.Errorf("Collectd parser error: %s", err) + return nil, fmt.Errorf("collectd parser error: %s", err) } metrics := []telegraf.Metric{} @@ -105,7 +105,7 @@ func (p *CollectdParser) ParseLine(line string) (telegraf.Metric, error) { } if len(metrics) != 1 { - return nil, errors.New("Line contains multiple metrics") + return nil, errors.New("line contains multiple metrics") } return metrics[0], nil @@ -128,8 +128,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric switch multiValue { case "split": for i := range vl.Values { - var name string - name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + name := fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) tags := make(map[string]string) fields := make(map[string]interface{}) @@ -157,11 +156,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric } // Drop invalid points - m, err := metric.New(name, tags, fields, timestamp) - if err != nil { - log.Printf("E! Dropping metric %v: %v", name, err) - continue - } + m := metric.New(name, tags, fields, timestamp) metrics = append(metrics, m) } @@ -193,10 +188,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric } } - m, err := metric.New(name, tags, fields, timestamp) - if err != nil { - log.Printf("E! Dropping metric %v: %v", name, err) - } + m := metric.New(name, tags, fields, timestamp) metrics = append(metrics, m) default: diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index b44d2fc2d2576..192c9216b3a82 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -3,7 +3,7 @@ The `csv` parser creates metrics from a document containing comma separated values. -### Configuration +## Configuration ```toml [[inputs.file]] @@ -73,8 +73,13 @@ values. ## in case of there is no timezone information. ## It follows the IANA Time Zone database. csv_timezone = "" + + ## Indicates values to skip, such as an empty string value "". + ## The field will be skipped entirely where it matches any values inserted here. + csv_skip_values = [] ``` -#### csv_timestamp_column, csv_timestamp_format + +### csv_timestamp_column, csv_timestamp_format By default the current time will be used for all created metrics, to set the time using the JSON document you can use the `csv_timestamp_column` and @@ -100,6 +105,7 @@ columns and rows. ### Examples Config: + ```toml [[inputs.file]] files = ["example"] @@ -110,13 +116,15 @@ Config: ``` Input: -``` + +```shell measurement,cpu,time_user,time_system,time_idle,time cpu,cpu0,42,42,42,2018-09-13T13:03:28Z ``` Output: -``` + +```shell cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 ``` diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 76d8306ea6e46..8f4969efb70bd 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -31,6 +31,9 @@ type Config struct { TimestampFormat string `toml:"csv_timestamp_format"` Timezone string `toml:"csv_timezone"` TrimSpace bool `toml:"csv_trim_space"` + SkipValues []string `toml:"csv_skip_values"` + + gotColumnNames bool TimeFunc func() time.Time DefaultTags map[string]string @@ -64,6 +67,8 @@ func NewParser(c *Config) (*Parser, error) { return nil, fmt.Errorf("csv_column_names field count doesn't match with csv_column_types") } + c.gotColumnNames = len(c.ColumnNames) > 0 + if c.TimeFunc == nil { c.TimeFunc = time.Now } @@ -75,7 +80,7 @@ func (p *Parser) SetTimeFunc(fn TimeFunc) { p.TimeFunc = fn } -func (p *Parser) compile(r io.Reader) (*csv.Reader, error) { +func (p *Parser) compile(r io.Reader) *csv.Reader { csvReader := csv.NewReader(r) // ensures that the reader reads records of different lengths without an error csvReader.FieldsPerRecord = -1 @@ -86,15 +91,12 @@ func (p *Parser) compile(r io.Reader) (*csv.Reader, error) { csvReader.Comment = []rune(p.Comment)[0] } csvReader.TrimLeadingSpace = p.TrimSpace - return csvReader, nil + return csvReader } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { r := bytes.NewReader(buf) - csvReader, err := p.compile(r) - if err != nil { - return nil, err - } + csvReader := p.compile(r) // skip first rows for i := 0; i < p.SkipRows; i++ { _, err := csvReader.Read() @@ -102,10 +104,13 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { return nil, err } } - // if there is a header and nothing in DataColumns + // if there is a header and we did not get DataColumns // set DataColumns to names extracted from the header - headerNames := make([]string, 0) - if len(p.ColumnNames) == 0 { + // we always reread the header to avoid side effects + // in cases where multiple files with different + // headers are read + if !p.gotColumnNames { + headerNames := make([]string, 0) for i := 0; i < p.HeaderRowCount; i++ { header, err := csvReader.Read() if err != nil { @@ -155,11 +160,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // it will also not skip any rows func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { r := bytes.NewReader([]byte(line)) - csvReader, err := p.compile(r) - if err != nil { - return nil, err - } - + csvReader := p.compile(r) // if there is nothing in DataColumns, ParseLine will fail if len(p.ColumnNames) == 0 { return nil, fmt.Errorf("[parsers.csv] data columns must be specified") @@ -190,6 +191,13 @@ outer: value = strings.Trim(value, " ") } + // don't record fields where the value matches a skip value + for _, s := range p.SkipValues { + if value == s { + continue outer + } + } + for _, tagName := range p.TagColumns { if tagName == fieldName { tags[tagName] = value @@ -197,6 +205,12 @@ outer: } } + // If the field name is the timestamp column, then keep field name as is. + if fieldName == p.TimestampColumn { + recordFields[fieldName] = value + continue + } + // Try explicit conversion only when column types is defined. if len(p.ColumnTypes) > 0 { // Throw error if current column count exceeds defined types. @@ -266,10 +280,8 @@ outer: delete(recordFields, p.TimestampColumn) delete(recordFields, p.MeasurementColumn) - m, err := metric.New(measurementName, tags, recordFields, metricTime) - if err != nil { - return nil, err - } + m := metric.New(measurementName, tags, recordFields, metricTime) + return m, nil } diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index c0f489365eb75..8e4a5181c7969 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -85,6 +85,26 @@ func TestTimestamp(t *testing.T) { require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) } +func TestTimestampYYYYMMDDHHmm(t *testing.T) { + p, err := NewParser( + &Config{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "200601021504", + TimeFunc: DefaultTime, + }, + ) + testCSV := `line1,line2,line3 +200905231605,70,test_name +200907111605,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094700000000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1247328300000000000)) +} func TestTimestampError(t *testing.T) { p, err := NewParser( &Config{ @@ -206,10 +226,8 @@ func TestValueConversion(t *testing.T) { metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) - expectedMetric, err1 := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) - returnedMetric, err2 := metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) - require.NoError(t, err1) - require.NoError(t, err2) + expectedMetric := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) + returnedMetric := metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) //deep equal fields require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) @@ -220,8 +238,7 @@ func TestValueConversion(t *testing.T) { metrics, err = p.Parse([]byte(testCSV)) require.NoError(t, err) - returnedMetric, err2 = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) - require.NoError(t, err2) + returnedMetric = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) //deep equal fields require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) @@ -593,3 +610,57 @@ func TestStaticMeasurementName(t *testing.T) { } testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime()) } + +func TestSkipEmptyStringValue(t *testing.T) { + p, err := NewParser( + &Config{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + SkipValues: []string{""}, + }, + ) + require.NoError(t, err) + testCSV := `a,b +1,""` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric("csv", + map[string]string{}, + map[string]interface{}{ + "a": 1, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime()) +} + +func TestSkipSpecifiedStringValue(t *testing.T) { + p, err := NewParser( + &Config{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + SkipValues: []string{"MM"}, + }, + ) + require.NoError(t, err) + testCSV := `a,b +1,MM` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric("csv", + map[string]string{}, + map[string]interface{}{ + "a": 1, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime()) +} diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index d8dcc92040aa4..2115bd8a07e78 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "log" - "strings" "time" "github.com/influxdata/telegraf" @@ -14,16 +13,12 @@ import ( "github.com/tidwall/gjson" ) -var fieldEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") -var keyEscaper = strings.NewReplacer(" ", "\\ ", ",", "\\,", "=", "\\=") - type TimeFunc func() time.Time // Parser parses json inputs containing dropwizard metrics, // either top-level or embedded inside a json field. // This parser is using gjson for retrieving paths within the json file. type parser struct { - // an optional json path containing the metric registry object // if left empty, the whole json object is parsed as a metric registry MetricRegistryPath string @@ -69,7 +64,6 @@ func NewParser() *parser { // Parse parses the input bytes to an array of metrics func (p *parser) Parse(buf []byte) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) metricTime, err := p.parseTime(buf) @@ -147,7 +141,6 @@ func (p *parser) SetDefaultTags(tags map[string]string) { } func (p *parser) readTags(buf []byte) map[string]string { - if p.TagsPath != "" { var tagsBytes []byte tagsResult := gjson.GetBytes(buf, p.TagsPath) @@ -173,7 +166,6 @@ func (p *parser) readTags(buf []byte) map[string]string { } func (p *parser) parseTime(buf []byte) (time.Time, error) { - if p.TimePath != "" { timeFormat := p.TimeFormat if timeFormat == "" { @@ -195,7 +187,6 @@ func (p *parser) parseTime(buf []byte) (time.Time, error) { } func (p *parser) unmarshalMetrics(buf []byte) (map[string]interface{}, error) { - var registryBytes []byte if p.MetricRegistryPath != "" { regResult := gjson.GetBytes(buf, p.MetricRegistryPath) @@ -236,11 +227,7 @@ func (p *parser) readDWMetrics(metricType string, dwms interface{}, metrics []te parsed, err := p.seriesParser.Parse([]byte(measurementName)) var m telegraf.Metric if err != nil || len(parsed) != 1 { - m, err = metric.New(measurementName, map[string]string{}, map[string]interface{}{}, tm) - if err != nil { - log.Printf("W! failed to create metric of type '%s': %s\n", metricType, err) - continue - } + m = metric.New(measurementName, map[string]string{}, map[string]interface{}{}, tm) } else { m = parsed[0] m.SetTime(tm) diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index df75c7f252969..b867670c9400e 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -497,13 +497,6 @@ func containsAll(t1 map[string]string, t2 map[string]string) bool { return true } -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func NoError(t *testing.T, err error) { require.NoError(t, err) } @@ -519,17 +512,15 @@ func TestDropWizard(t *testing.T) { name: "minimal", input: []byte(`{"version": "3.0.0", "counters": {"cpu": {"value": 42}}}`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "metric_type": "counter", - }, - map[string]interface{}{ - "value": 42.0, - }, - testTimeFunc(), - ), + metric.New( + "cpu", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42.0, + }, + testTimeFunc(), ), }, errFunc: NoError, @@ -538,17 +529,15 @@ func TestDropWizard(t *testing.T) { name: "name with space unescaped", input: []byte(`{"version": "3.0.0", "counters": {"hello world": {"value": 42}}}`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "hello world", - map[string]string{ - "metric_type": "counter", - }, - map[string]interface{}{ - "value": 42.0, - }, - testTimeFunc(), - ), + metric.New( + "hello world", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42.0, + }, + testTimeFunc(), ), }, errFunc: NoError, @@ -564,17 +553,15 @@ func TestDropWizard(t *testing.T) { name: "name with space double slash escape", input: []byte(`{"version": "3.0.0", "counters": {"hello\\ world": {"value": 42}}}`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "hello world", - map[string]string{ - "metric_type": "counter", - }, - map[string]interface{}{ - "value": 42.0, - }, - testTimeFunc(), - ), + metric.New( + "hello world", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42.0, + }, + testTimeFunc(), ), }, errFunc: NoError, diff --git a/plugins/parsers/form_urlencoded/parser.go b/plugins/parsers/form_urlencoded/parser.go index f38d87a80eac0..f26740709251a 100644 --- a/plugins/parsers/form_urlencoded/parser.go +++ b/plugins/parsers/form_urlencoded/parser.go @@ -47,12 +47,9 @@ func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { tags[key] = value } - metric, err := metric.New(p.MetricName, tags, fields, time.Now().UTC()) - if err != nil { - return nil, err - } + m := metric.New(p.MetricName, tags, fields, time.Now().UTC()) - return []telegraf.Metric{metric}, nil + return []telegraf.Metric{m}, nil } // ParseLine delegates a single line of text to the Parse function diff --git a/plugins/parsers/graphite/README.md b/plugins/parsers/graphite/README.md index b0b1127aa4ce0..63d7c936ae819 100644 --- a/plugins/parsers/graphite/README.md +++ b/plugins/parsers/graphite/README.md @@ -1,7 +1,7 @@ # Graphite The Graphite data format translates graphite *dot* buckets directly into -telegraf measurement names, with a single value field, and without any tags. +telegraf measurement names, with a single value field, and optional tags. By default, the separator is left as `.`, but this can be changed using the `separator` argument. For more advanced options, Telegraf supports specifying [templates](#templates) to translate graphite buckets into Telegraf metrics. diff --git a/plugins/parsers/graphite/config.go b/plugins/parsers/graphite/config.go index 915077c06b299..43c7058693b33 100644 --- a/plugins/parsers/graphite/config.go +++ b/plugins/parsers/graphite/config.go @@ -19,11 +19,7 @@ type Config struct { // Validate validates the config's templates and tags. func (c *Config) Validate() error { - if err := c.validateTemplates(); err != nil { - return err - } - - return nil + return c.validateTemplates() } func (c *Config) validateTemplates() error { diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index f50217711c15c..dac4f55f83f25 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -103,15 +103,17 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { return nil, fmt.Errorf("received %q which doesn't have required fields", line) } + parts := strings.Split(fields[0], ";") + // decode the name and tags - measurement, tags, field, err := p.templateEngine.Apply(fields[0]) + measurement, tags, field, err := p.templateEngine.Apply(parts[0]) if err != nil { return nil, err } // Could not extract measurement, use the raw value if measurement == "" { - measurement = fields[0] + measurement = parts[0] } // Parse value. @@ -147,6 +149,24 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { } } } + + // Split name and tags + if len(parts) >= 2 { + for _, tag := range parts[1:] { + tagValue := strings.Split(tag, "=") + if len(tagValue) != 2 || len(tagValue[0]) == 0 || len(tagValue[1]) == 0 { + continue + } + if strings.ContainsAny(tagValue[0], "!^") { + continue + } + if strings.Index(tagValue[1], "~") == 0 { + continue + } + tags[tagValue[0]] = tagValue[1] + } + } + // Set the default tags on the point if they are not already set for k, v := range p.DefaultTags { if _, ok := tags[k]; !ok { @@ -154,7 +174,7 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { } } - return metric.New(measurement, tags, fieldValues, timestamp) + return metric.New(measurement, tags, fieldValues, timestamp), nil } // ApplyTemplate extracts the template fields from the given line and diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index 9254574b604e6..991cce661762c 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -178,6 +178,67 @@ func TestParseLine(t *testing.T) { value: 50, time: testTime, }, + { + test: "normal case with tag", + input: `cpu.foo.bar;tag1=value1 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + "tag1": "value1", + }, + value: 50, + time: testTime, + }, + { + test: "wrong tag names", + input: `cpu.foo.bar;tag!1=value1;tag^2=value2 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, + { + test: "empty tag name", + input: `cpu.foo.bar;=value1 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, + { + test: "wrong tag value", + input: `cpu.foo.bar;tag1=~value1 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, + { + test: "empty tag value", + input: `cpu.foo.bar;tag1= 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, { test: "metric only with float value", input: `cpu 50.554 ` + strTime, @@ -279,6 +340,20 @@ func TestParse(t *testing.T) { value: 50, time: testTime, }, + { + test: "normal case with tag", + input: []byte(`cpu.foo.bar;tag1=value1 50 ` + strTime), + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + "tag1": "value1", + }, + value: 50, + time: testTime, + }, + { test: "metric only with float value", input: []byte(`cpu 50.554 ` + strTime), @@ -397,11 +472,10 @@ func TestFilterMatchDefault(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("miss.servers.localhost.cpu_load", + exp := metric.New("miss.servers.localhost.cpu_load", map[string]string{}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("miss.servers.localhost.cpu_load 11 1435077219") assert.NoError(t, err) @@ -415,11 +489,10 @@ func TestFilterMatchMultipleMeasurement(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu.cpu_load.10", + exp := metric.New("cpu.cpu_load.10", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219") assert.NoError(t, err) @@ -434,11 +507,10 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) { ) assert.NoError(t, err) - exp, err := metric.New("cpu_cpu_load_10", + exp := metric.New("cpu_cpu_load_10", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219") assert.NoError(t, err) @@ -452,7 +524,7 @@ func TestFilterMatchSingle(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) @@ -469,11 +541,10 @@ func TestParseNoMatch(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("servers.localhost.memory.VmallocChunk", + exp := metric.New("servers.localhost.memory.VmallocChunk", map[string]string{}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.memory.VmallocChunk 11 1435077219") assert.NoError(t, err) @@ -487,11 +558,10 @@ func TestFilterMatchWildcard(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") assert.NoError(t, err) @@ -507,11 +577,10 @@ func TestFilterMatchExactBeforeWildcard(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") assert.NoError(t, err) @@ -556,11 +625,10 @@ func TestFilterMatchMultipleWildcards(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "server01"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.server01.cpu_load 11 1435077219") assert.NoError(t, err) diff --git a/plugins/parsers/grok/influx_patterns.go b/plugins/parsers/grok/influx_patterns.go index 282c28111b14c..428d129fc2394 100644 --- a/plugins/parsers/grok/influx_patterns.go +++ b/plugins/parsers/grok/influx_patterns.go @@ -1,6 +1,6 @@ package grok -const DEFAULT_PATTERNS = ` +const DefaultPatterns = ` # Example log file pattern, example log looks like this: # [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs # Breakdown of the DURATION pattern below: diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 810190b9d2f12..57e6269994ed2 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -38,18 +38,18 @@ var timeLayouts = map[string]string{ } const ( - MEASUREMENT = "measurement" - INT = "int" - TAG = "tag" - FLOAT = "float" - STRING = "string" - DURATION = "duration" - DROP = "drop" - EPOCH = "EPOCH" - EPOCH_MILLI = "EPOCH_MILLI" - EPOCH_NANO = "EPOCH_NANO" - SYSLOG_TIMESTAMP = "SYSLOG_TIMESTAMP" - GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP" + Measurement = "measurement" + Int = "int" + Tag = "tag" + Float = "float" + String = "string" + Duration = "duration" + Drop = "drop" + Epoch = "EPOCH" + EpochMilli = "EPOCH_MILLI" + EpochNano = "EPOCH_NANO" + SyslogTimestamp = "SYSLOG_TIMESTAMP" + GenericTimestamp = "GENERIC_TIMESTAMP" ) var ( @@ -161,7 +161,7 @@ func (p *Parser) Compile() error { // Combine user-supplied CustomPatterns with DEFAULT_PATTERNS and parse // them together as the same type of pattern. - p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns + p.CustomPatterns = DefaultPatterns + p.CustomPatterns if len(p.CustomPatterns) != 0 { scanner := bufio.NewScanner(strings.NewReader(p.CustomPatterns)) p.addCustomPatterns(scanner) @@ -243,38 +243,38 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } // if we didn't find a type OR timestamp modifier, assume string if t == "" { - t = STRING + t = String } switch t { - case MEASUREMENT: + case Measurement: p.Measurement = v - case INT: + case Int: iv, err := strconv.ParseInt(v, 0, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { fields[k] = iv } - case FLOAT: + case Float: fv, err := strconv.ParseFloat(v, 64) if err != nil { log.Printf("E! Error parsing %s to float: %s", v, err) } else { fields[k] = fv } - case DURATION: + case Duration: d, err := time.ParseDuration(v) if err != nil { log.Printf("E! Error parsing %s to duration: %s", v, err) } else { fields[k] = int64(d) } - case TAG: + case Tag: tags[k] = v - case STRING: + case String: fields[k] = v - case EPOCH: + case Epoch: parts := strings.SplitN(v, ".", 2) if len(parts) == 0 { log.Printf("E! Error parsing %s to timestamp: %s", v, err) @@ -299,21 +299,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { ts = ts.Add(time.Duration(nanosec) * time.Nanosecond) } timestamp = ts - case EPOCH_MILLI: + case EpochMilli: ms, err := strconv.ParseInt(v, 10, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, ms*int64(time.Millisecond)) } - case EPOCH_NANO: + case EpochNano: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, iv) } - case SYSLOG_TIMESTAMP: + case SyslogTimestamp: ts, err := time.ParseInLocation(time.Stamp, v, p.loc) if err == nil { if ts.Year() == 0 { @@ -323,7 +323,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } else { log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) } - case GENERIC_TIMESTAMP: + case GenericTimestamp: var foundTs bool // first try timestamp layouts that we've already found for _, layout := range p.foundTsLayouts { @@ -353,7 +353,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { log.Printf("E! Error parsing timestamp [%s], could not find any "+ "suitable time layouts.", v) } - case DROP: + case Drop: // goodbye! default: v = strings.Replace(v, ",", ".", -1) @@ -370,14 +370,13 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } if p.UniqueTimestamp != "auto" { - return metric.New(p.Measurement, tags, fields, timestamp) + return metric.New(p.Measurement, tags, fields, timestamp), nil } - return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)) + return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)), nil } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) scanner := bufio.NewScanner(bytes.NewReader(buf)) diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 1c409e8a542b6..d51f30385a964 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1013,7 +1013,6 @@ func TestSyslogTimestamp(t *testing.T) { } func TestReplaceTimestampComma(t *testing.T) { - p := &Parser{ Patterns: []string{`%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05.000"} successfulMatches=%{NUMBER:value:int}`}, } diff --git a/plugins/parsers/influx/README.md b/plugins/parsers/influx/README.md index 51c0106e623f3..874bb279d5a77 100644 --- a/plugins/parsers/influx/README.md +++ b/plugins/parsers/influx/README.md @@ -3,7 +3,7 @@ There are no additional configuration options for InfluxDB [line protocol][]. The metrics are parsed directly into Telegraf metrics. -[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line/ +[line protocol]: https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/ ### Configuration diff --git a/plugins/parsers/influx/escape.go b/plugins/parsers/influx/escape.go index 01e42a8d51cb5..211963d8abc35 100644 --- a/plugins/parsers/influx/escape.go +++ b/plugins/parsers/influx/escape.go @@ -36,25 +36,22 @@ var ( func unescape(b []byte) string { if bytes.ContainsAny(b, escapes) { return unescaper.Replace(unsafeBytesToString(b)) - } else { - return string(b) } + return string(b) } func nameUnescape(b []byte) string { if bytes.ContainsAny(b, nameEscapes) { return nameUnescaper.Replace(unsafeBytesToString(b)) - } else { - return string(b) } + return string(b) } func stringFieldUnescape(b []byte) string { if bytes.ContainsAny(b, stringFieldEscapes) { return stringFieldUnescaper.Replace(unsafeBytesToString(b)) - } else { - return string(b) } + return string(b) } // parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index ae08d5a7c0870..7d1a3af3e12a1 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -12,7 +12,6 @@ import ( // MetricHandler implements the Handler interface and produces telegraf.Metric. type MetricHandler struct { - err error timePrecision time.Duration timeFunc TimeFunc metric telegraf.Metric @@ -47,10 +46,9 @@ func (h *MetricHandler) Metric() (telegraf.Metric, error) { } func (h *MetricHandler) SetMeasurement(name []byte) error { - var err error - h.metric, err = metric.New(nameUnescape(name), + h.metric = metric.New(nameUnescape(name), nil, nil, time.Time{}) - return err + return nil } func (h *MetricHandler) AddTag(key []byte, value []byte) error { diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 332b73592486e..4bbf8c079476b 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -1,4 +1,3 @@ - //line plugins/parsers/influx/machine.go.rl:1 package influx @@ -16,29 +15,25 @@ func (e *readErr) Error() string { } var ( - ErrNameParse = errors.New("expected measurement name") - ErrFieldParse = errors.New("expected field") - ErrTagParse = errors.New("expected tag") + ErrNameParse = errors.New("expected measurement name") + ErrFieldParse = errors.New("expected field") + ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") - ErrParse = errors.New("parse error") - EOF = errors.New("EOF") + ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) - //line plugins/parsers/influx/machine.go.rl:318 - - //line plugins/parsers/influx/machine.go:33 -const LineProtocol_start int = 269 -const LineProtocol_first_final int = 269 +const LineProtocol_start int = 46 +const LineProtocol_first_final int = 46 const LineProtocol_error int = 0 -const LineProtocol_en_main int = 269 -const LineProtocol_en_discard_line int = 257 -const LineProtocol_en_align int = 739 -const LineProtocol_en_series int = 260 - +const LineProtocol_en_main int = 46 +const LineProtocol_en_discard_line int = 34 +const LineProtocol_en_align int = 85 +const LineProtocol_en_series int = 37 //line plugins/parsers/influx/machine.go.rl:321 @@ -69,26 +64,25 @@ type machine struct { func NewMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_align, } - //line plugins/parsers/influx/machine.go.rl:354 - + //line plugins/parsers/influx/machine.go.rl:355 - + //line plugins/parsers/influx/machine.go.rl:356 - + //line plugins/parsers/influx/machine.go.rl:357 - + //line plugins/parsers/influx/machine.go.rl:358 - + //line plugins/parsers/influx/machine.go.rl:359 - + //line plugins/parsers/influx/machine.go:90 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:360 @@ -98,24 +92,23 @@ func NewMachine(handler Handler) *machine { func NewSeriesMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_series, } - //line plugins/parsers/influx/machine.go.rl:371 - + //line plugins/parsers/influx/machine.go.rl:372 - + //line plugins/parsers/influx/machine.go.rl:373 - + //line plugins/parsers/influx/machine.go.rl:374 - + //line plugins/parsers/influx/machine.go.rl:375 - + //line plugins/parsers/influx/machine.go:117 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:376 @@ -135,10 +128,9 @@ func (m *machine) SetData(data []byte) { m.beginMetric = false m.finishMetric = false - //line plugins/parsers/influx/machine.go:140 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:393 @@ -163,29641 +155,2733 @@ func (m *machine) Next() error { func (m *machine) exec() error { var err error - + //line plugins/parsers/influx/machine.go:168 { - if ( m.p) == ( m.pe) { - goto _test_eof - } - goto _resume + if (m.p) == (m.pe) { + goto _test_eof + } + goto _resume -_again: - switch ( m.cs) { - case 269: - goto st269 - case 1: - goto st1 - case 2: - goto st2 - case 3: - goto st3 - case 0: - goto st0 - case 4: - goto st4 - case 5: - goto st5 - case 6: - goto st6 - case 270: - goto st270 - case 271: - goto st271 - case 272: - goto st272 - case 7: - goto st7 - case 8: - goto st8 - case 9: - goto st9 - case 10: - goto st10 - case 11: - goto st11 - case 12: - goto st12 - case 13: - goto st13 - case 14: - goto st14 - case 15: - goto st15 - case 16: - goto st16 - case 17: - goto st17 - case 18: - goto st18 - case 19: - goto st19 - case 20: - goto st20 - case 21: - goto st21 - case 22: - goto st22 - case 23: - goto st23 - case 24: - goto st24 - case 25: - goto st25 - case 26: - goto st26 - case 27: - goto st27 - case 28: - goto st28 - case 29: - goto st29 - case 30: - goto st30 - case 31: - goto st31 - case 273: - goto st273 - case 274: - goto st274 - case 32: - goto st32 - case 33: - goto st33 - case 275: - goto st275 - case 276: - goto st276 - case 277: - goto st277 - case 34: - goto st34 - case 278: - goto st278 - case 279: - goto st279 - case 280: - goto st280 - case 281: - goto st281 - case 282: - goto st282 - case 283: - goto st283 - case 284: - goto st284 - case 285: - goto st285 - case 286: - goto st286 - case 287: - goto st287 - case 288: - goto st288 - case 289: - goto st289 - case 290: - goto st290 - case 291: - goto st291 - case 292: - goto st292 - case 293: - goto st293 - case 294: - goto st294 - case 295: - goto st295 - case 35: - goto st35 - case 36: - goto st36 - case 296: - goto st296 - case 297: - goto st297 - case 298: - goto st298 - case 37: - goto st37 - case 38: - goto st38 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 299: - goto st299 - case 300: - goto st300 - case 301: - goto st301 - case 302: - goto st302 - case 42: - goto st42 - case 303: - goto st303 - case 304: - goto st304 - case 305: - goto st305 - case 306: - goto st306 - case 307: - goto st307 - case 308: - goto st308 - case 309: - goto st309 - case 310: - goto st310 - case 311: - goto st311 - case 312: - goto st312 - case 313: - goto st313 - case 314: - goto st314 - case 315: - goto st315 - case 316: - goto st316 - case 317: - goto st317 - case 318: - goto st318 - case 319: - goto st319 - case 320: - goto st320 - case 321: - goto st321 - case 322: - goto st322 - case 323: - goto st323 - case 324: - goto st324 - case 43: - goto st43 - case 44: - goto st44 - case 45: - goto st45 - case 46: - goto st46 - case 47: - goto st47 - case 48: - goto st48 - case 49: - goto st49 - case 50: - goto st50 - case 51: - goto st51 - case 52: - goto st52 - case 325: - goto st325 - case 326: - goto st326 - case 327: - goto st327 - case 53: - goto st53 - case 54: - goto st54 - case 55: - goto st55 - case 56: - goto st56 - case 57: - goto st57 - case 58: - goto st58 - case 328: - goto st328 - case 329: - goto st329 - case 59: - goto st59 - case 330: - goto st330 - case 331: - goto st331 - case 332: - goto st332 - case 333: - goto st333 - case 334: - goto st334 - case 335: - goto st335 - case 336: - goto st336 - case 337: - goto st337 - case 338: - goto st338 - case 339: - goto st339 - case 340: - goto st340 - case 341: - goto st341 - case 342: - goto st342 - case 343: - goto st343 - case 344: - goto st344 - case 345: - goto st345 - case 346: - goto st346 - case 347: - goto st347 - case 348: - goto st348 - case 349: - goto st349 - case 60: - goto st60 - case 350: - goto st350 - case 351: - goto st351 - case 352: - goto st352 - case 61: - goto st61 - case 353: - goto st353 - case 354: - goto st354 - case 355: - goto st355 - case 356: - goto st356 - case 357: - goto st357 - case 358: - goto st358 - case 359: - goto st359 - case 360: - goto st360 - case 361: - goto st361 - case 362: - goto st362 - case 363: - goto st363 - case 364: - goto st364 - case 365: - goto st365 - case 366: - goto st366 - case 367: - goto st367 - case 368: - goto st368 - case 369: - goto st369 - case 370: - goto st370 - case 371: - goto st371 - case 372: - goto st372 - case 62: - goto st62 - case 63: - goto st63 - case 64: - goto st64 - case 65: - goto st65 - case 66: - goto st66 - case 373: - goto st373 - case 67: - goto st67 - case 68: - goto st68 - case 69: - goto st69 - case 70: - goto st70 - case 71: - goto st71 - case 374: - goto st374 - case 375: - goto st375 - case 376: - goto st376 - case 72: - goto st72 - case 73: - goto st73 - case 74: - goto st74 - case 377: - goto st377 - case 378: - goto st378 - case 379: - goto st379 - case 75: - goto st75 - case 380: - goto st380 - case 381: - goto st381 - case 382: - goto st382 - case 383: - goto st383 - case 384: - goto st384 - case 385: - goto st385 - case 386: - goto st386 - case 387: - goto st387 - case 388: - goto st388 - case 389: - goto st389 - case 390: - goto st390 - case 391: - goto st391 - case 392: - goto st392 - case 393: - goto st393 - case 394: - goto st394 - case 395: - goto st395 - case 396: - goto st396 - case 397: - goto st397 - case 398: - goto st398 - case 399: - goto st399 - case 76: - goto st76 - case 77: - goto st77 - case 78: - goto st78 - case 79: - goto st79 - case 80: - goto st80 - case 81: - goto st81 - case 82: - goto st82 - case 83: - goto st83 - case 84: - goto st84 - case 85: - goto st85 - case 86: - goto st86 - case 87: - goto st87 - case 88: - goto st88 - case 89: - goto st89 - case 400: - goto st400 - case 401: - goto st401 - case 402: - goto st402 - case 403: - goto st403 - case 90: - goto st90 - case 91: - goto st91 - case 92: - goto st92 - case 93: - goto st93 - case 404: - goto st404 - case 405: - goto st405 - case 94: - goto st94 - case 95: - goto st95 - case 406: - goto st406 - case 96: - goto st96 - case 97: - goto st97 - case 407: - goto st407 - case 408: - goto st408 - case 98: - goto st98 - case 409: - goto st409 - case 410: - goto st410 - case 99: - goto st99 - case 100: - goto st100 - case 411: - goto st411 - case 412: - goto st412 - case 413: - goto st413 - case 414: - goto st414 - case 415: - goto st415 - case 416: - goto st416 - case 417: - goto st417 - case 418: - goto st418 - case 419: - goto st419 - case 420: - goto st420 - case 421: - goto st421 - case 422: - goto st422 - case 423: - goto st423 - case 424: - goto st424 - case 425: - goto st425 - case 426: - goto st426 - case 427: - goto st427 - case 428: - goto st428 - case 101: - goto st101 - case 429: - goto st429 - case 430: - goto st430 - case 431: - goto st431 - case 102: - goto st102 - case 103: - goto st103 - case 432: - goto st432 - case 433: - goto st433 - case 434: - goto st434 - case 104: - goto st104 - case 435: - goto st435 - case 436: - goto st436 - case 437: - goto st437 - case 438: - goto st438 - case 439: - goto st439 - case 440: - goto st440 - case 441: - goto st441 - case 442: - goto st442 - case 443: - goto st443 - case 444: - goto st444 - case 445: - goto st445 - case 446: - goto st446 - case 447: - goto st447 - case 448: - goto st448 - case 449: - goto st449 - case 450: - goto st450 - case 451: - goto st451 - case 452: - goto st452 - case 453: - goto st453 - case 454: - goto st454 - case 105: - goto st105 - case 455: - goto st455 - case 456: - goto st456 - case 457: - goto st457 - case 458: - goto st458 - case 459: - goto st459 - case 460: - goto st460 - case 461: - goto st461 - case 462: - goto st462 - case 463: - goto st463 - case 464: - goto st464 - case 465: - goto st465 - case 466: - goto st466 - case 467: - goto st467 - case 468: - goto st468 - case 469: - goto st469 - case 470: - goto st470 - case 471: - goto st471 - case 472: - goto st472 - case 473: - goto st473 - case 474: - goto st474 - case 475: - goto st475 - case 476: - goto st476 - case 106: - goto st106 - case 107: - goto st107 - case 108: - goto st108 - case 109: - goto st109 - case 110: - goto st110 - case 477: - goto st477 - case 111: - goto st111 - case 478: - goto st478 - case 479: - goto st479 - case 112: - goto st112 - case 480: - goto st480 - case 481: - goto st481 - case 482: - goto st482 - case 483: - goto st483 - case 484: - goto st484 - case 485: - goto st485 - case 486: - goto st486 - case 487: - goto st487 - case 488: - goto st488 - case 113: - goto st113 - case 114: - goto st114 - case 115: - goto st115 - case 489: - goto st489 - case 116: - goto st116 - case 117: - goto st117 - case 118: - goto st118 - case 490: - goto st490 - case 119: - goto st119 - case 120: - goto st120 - case 491: - goto st491 - case 492: - goto st492 - case 121: - goto st121 - case 122: - goto st122 - case 123: - goto st123 - case 124: - goto st124 - case 493: - goto st493 - case 494: - goto st494 - case 495: - goto st495 - case 125: - goto st125 - case 496: - goto st496 - case 497: - goto st497 - case 498: - goto st498 - case 499: - goto st499 - case 500: - goto st500 - case 501: - goto st501 - case 502: - goto st502 - case 503: - goto st503 - case 504: - goto st504 - case 505: - goto st505 - case 506: - goto st506 - case 507: - goto st507 - case 508: - goto st508 - case 509: - goto st509 - case 510: - goto st510 - case 511: - goto st511 - case 512: - goto st512 - case 513: - goto st513 - case 514: - goto st514 - case 515: - goto st515 - case 126: - goto st126 - case 127: - goto st127 - case 516: - goto st516 - case 517: - goto st517 - case 518: - goto st518 - case 519: - goto st519 - case 520: - goto st520 - case 521: - goto st521 - case 522: - goto st522 - case 523: - goto st523 - case 524: - goto st524 - case 128: - goto st128 - case 129: - goto st129 - case 130: - goto st130 - case 525: - goto st525 - case 131: - goto st131 - case 132: - goto st132 - case 133: - goto st133 - case 526: - goto st526 - case 134: - goto st134 - case 135: - goto st135 - case 527: - goto st527 - case 528: - goto st528 - case 136: - goto st136 - case 137: - goto st137 - case 138: - goto st138 - case 529: - goto st529 - case 530: - goto st530 - case 139: - goto st139 - case 531: - goto st531 - case 140: - goto st140 - case 532: - goto st532 - case 533: - goto st533 - case 534: - goto st534 - case 535: - goto st535 - case 536: - goto st536 - case 537: - goto st537 - case 538: - goto st538 - case 539: - goto st539 - case 141: - goto st141 - case 142: - goto st142 - case 143: - goto st143 - case 540: - goto st540 - case 144: - goto st144 - case 145: - goto st145 - case 146: - goto st146 - case 541: - goto st541 - case 147: - goto st147 - case 148: - goto st148 - case 542: - goto st542 - case 543: - goto st543 - case 544: - goto st544 - case 545: - goto st545 - case 546: - goto st546 - case 547: - goto st547 - case 548: - goto st548 - case 549: - goto st549 - case 550: - goto st550 - case 551: - goto st551 - case 552: - goto st552 - case 553: - goto st553 - case 554: - goto st554 - case 555: - goto st555 - case 556: - goto st556 - case 557: - goto st557 - case 558: - goto st558 - case 559: - goto st559 - case 560: - goto st560 - case 561: - goto st561 - case 149: - goto st149 - case 150: - goto st150 - case 562: - goto st562 - case 563: - goto st563 - case 564: - goto st564 - case 151: - goto st151 - case 565: - goto st565 - case 566: - goto st566 - case 152: - goto st152 - case 567: - goto st567 - case 568: - goto st568 - case 569: - goto st569 - case 570: - goto st570 - case 571: - goto st571 - case 572: - goto st572 - case 573: - goto st573 - case 574: - goto st574 - case 575: - goto st575 - case 576: - goto st576 - case 577: - goto st577 - case 578: - goto st578 - case 579: - goto st579 - case 580: - goto st580 - case 581: - goto st581 - case 582: - goto st582 - case 583: - goto st583 - case 584: - goto st584 - case 153: - goto st153 - case 154: - goto st154 - case 585: - goto st585 - case 155: - goto st155 - case 586: - goto st586 - case 587: - goto st587 - case 588: - goto st588 - case 589: - goto st589 - case 590: - goto st590 - case 591: - goto st591 - case 592: - goto st592 - case 593: - goto st593 - case 156: - goto st156 - case 157: - goto st157 - case 158: - goto st158 - case 594: - goto st594 - case 159: - goto st159 - case 160: - goto st160 - case 161: - goto st161 - case 595: - goto st595 - case 162: - goto st162 - case 163: - goto st163 - case 596: - goto st596 - case 597: - goto st597 - case 164: - goto st164 - case 165: - goto st165 - case 166: - goto st166 - case 167: - goto st167 - case 168: - goto st168 - case 169: - goto st169 - case 598: - goto st598 - case 599: - goto st599 - case 600: - goto st600 - case 601: - goto st601 - case 602: - goto st602 - case 603: - goto st603 - case 604: - goto st604 - case 605: - goto st605 - case 606: - goto st606 - case 607: - goto st607 - case 608: - goto st608 - case 609: - goto st609 - case 610: - goto st610 - case 611: - goto st611 - case 612: - goto st612 - case 613: - goto st613 - case 614: - goto st614 - case 615: - goto st615 - case 616: - goto st616 - case 170: - goto st170 - case 171: - goto st171 - case 172: - goto st172 - case 617: - goto st617 - case 618: - goto st618 - case 619: - goto st619 - case 173: - goto st173 - case 620: - goto st620 - case 621: - goto st621 - case 174: - goto st174 - case 622: - goto st622 - case 623: - goto st623 - case 624: - goto st624 - case 625: - goto st625 - case 626: - goto st626 - case 175: - goto st175 - case 176: - goto st176 - case 177: - goto st177 - case 627: - goto st627 - case 178: - goto st178 - case 179: - goto st179 - case 180: - goto st180 - case 628: - goto st628 - case 181: - goto st181 - case 182: - goto st182 - case 629: - goto st629 - case 630: - goto st630 - case 183: - goto st183 - case 631: - goto st631 - case 632: - goto st632 - case 633: - goto st633 - case 184: - goto st184 - case 185: - goto st185 - case 186: - goto st186 - case 634: - goto st634 - case 187: - goto st187 - case 188: - goto st188 - case 189: - goto st189 - case 635: - goto st635 - case 190: - goto st190 - case 191: - goto st191 - case 636: - goto st636 - case 637: - goto st637 - case 192: - goto st192 - case 193: - goto st193 - case 194: - goto st194 - case 638: - goto st638 - case 195: - goto st195 - case 196: - goto st196 - case 639: - goto st639 - case 640: - goto st640 - case 641: - goto st641 - case 642: - goto st642 - case 643: - goto st643 - case 644: - goto st644 - case 645: - goto st645 - case 646: - goto st646 - case 197: - goto st197 - case 198: - goto st198 - case 199: - goto st199 - case 647: - goto st647 - case 200: - goto st200 - case 201: - goto st201 - case 202: - goto st202 - case 648: - goto st648 - case 203: - goto st203 - case 204: - goto st204 - case 649: - goto st649 - case 650: - goto st650 - case 205: - goto st205 - case 206: - goto st206 - case 207: - goto st207 - case 651: - goto st651 - case 652: - goto st652 - case 653: - goto st653 - case 654: - goto st654 - case 655: - goto st655 - case 656: - goto st656 - case 657: - goto st657 - case 658: - goto st658 - case 659: - goto st659 - case 660: - goto st660 - case 661: - goto st661 - case 662: - goto st662 - case 663: - goto st663 - case 664: - goto st664 - case 665: - goto st665 - case 666: - goto st666 - case 667: - goto st667 - case 668: - goto st668 - case 669: - goto st669 - case 208: - goto st208 - case 209: - goto st209 - case 210: - goto st210 - case 211: - goto st211 - case 212: - goto st212 - case 670: - goto st670 - case 213: - goto st213 - case 214: - goto st214 - case 671: - goto st671 - case 672: - goto st672 - case 673: - goto st673 - case 674: - goto st674 - case 675: - goto st675 - case 676: - goto st676 - case 677: - goto st677 - case 678: - goto st678 - case 679: - goto st679 - case 215: - goto st215 - case 216: - goto st216 - case 217: - goto st217 - case 680: - goto st680 - case 218: - goto st218 - case 219: - goto st219 - case 220: - goto st220 - case 681: - goto st681 - case 221: - goto st221 - case 222: - goto st222 - case 682: - goto st682 - case 683: - goto st683 - case 223: - goto st223 - case 224: - goto st224 - case 225: - goto st225 - case 684: - goto st684 - case 226: - goto st226 - case 227: - goto st227 - case 685: - goto st685 - case 686: - goto st686 - case 687: - goto st687 - case 688: - goto st688 - case 689: - goto st689 - case 690: - goto st690 - case 691: - goto st691 - case 692: - goto st692 - case 228: - goto st228 - case 229: - goto st229 - case 230: - goto st230 - case 693: - goto st693 - case 231: - goto st231 - case 232: - goto st232 - case 694: - goto st694 - case 695: - goto st695 - case 696: - goto st696 - case 697: - goto st697 - case 698: - goto st698 - case 699: - goto st699 - case 700: - goto st700 - case 701: - goto st701 - case 233: - goto st233 - case 234: - goto st234 - case 235: - goto st235 - case 702: - goto st702 - case 236: - goto st236 - case 237: - goto st237 - case 238: - goto st238 - case 703: - goto st703 - case 239: - goto st239 - case 240: - goto st240 - case 704: - goto st704 - case 705: - goto st705 - case 241: - goto st241 - case 242: - goto st242 - case 243: - goto st243 - case 706: - goto st706 - case 707: - goto st707 - case 708: - goto st708 - case 709: - goto st709 - case 710: - goto st710 - case 711: - goto st711 - case 712: - goto st712 - case 713: - goto st713 - case 714: - goto st714 - case 715: - goto st715 - case 716: - goto st716 - case 717: - goto st717 - case 718: - goto st718 - case 719: - goto st719 - case 720: - goto st720 - case 721: - goto st721 - case 722: - goto st722 - case 723: - goto st723 - case 724: - goto st724 - case 244: - goto st244 - case 245: - goto st245 - case 725: - goto st725 - case 246: - goto st246 - case 247: - goto st247 - case 726: - goto st726 - case 727: - goto st727 - case 728: - goto st728 - case 729: - goto st729 - case 730: - goto st730 - case 731: - goto st731 - case 732: - goto st732 - case 733: - goto st733 - case 248: - goto st248 - case 249: - goto st249 - case 250: - goto st250 - case 734: - goto st734 - case 251: - goto st251 - case 252: - goto st252 - case 253: - goto st253 - case 735: - goto st735 - case 254: - goto st254 - case 255: - goto st255 - case 736: - goto st736 - case 737: - goto st737 - case 256: - goto st256 - case 257: - goto st257 - case 738: - goto st738 - case 260: - goto st260 - case 740: - goto st740 - case 741: - goto st741 - case 261: - goto st261 - case 262: - goto st262 - case 263: - goto st263 - case 264: - goto st264 - case 742: - goto st742 - case 265: - goto st265 - case 743: - goto st743 - case 266: - goto st266 - case 267: - goto st267 - case 268: - goto st268 - case 739: - goto st739 - case 258: - goto st258 - case 259: - goto st259 - } + _again: + switch m.cs { + case 46: + goto st46 + case 1: + goto st1 + case 2: + goto st2 + case 3: + goto st3 + case 0: + goto st0 + case 4: + goto st4 + case 5: + goto st5 + case 6: + goto st6 + case 47: + goto st47 + case 48: + goto st48 + case 49: + goto st49 + case 7: + goto st7 + case 8: + goto st8 + case 9: + goto st9 + case 10: + goto st10 + case 50: + goto st50 + case 51: + goto st51 + case 52: + goto st52 + case 53: + goto st53 + case 54: + goto st54 + case 55: + goto st55 + case 56: + goto st56 + case 57: + goto st57 + case 58: + goto st58 + case 59: + goto st59 + case 60: + goto st60 + case 61: + goto st61 + case 62: + goto st62 + case 63: + goto st63 + case 64: + goto st64 + case 65: + goto st65 + case 66: + goto st66 + case 67: + goto st67 + case 68: + goto st68 + case 69: + goto st69 + case 11: + goto st11 + case 12: + goto st12 + case 13: + goto st13 + case 14: + goto st14 + case 15: + goto st15 + case 70: + goto st70 + case 16: + goto st16 + case 17: + goto st17 + case 71: + goto st71 + case 72: + goto st72 + case 73: + goto st73 + case 74: + goto st74 + case 75: + goto st75 + case 76: + goto st76 + case 77: + goto st77 + case 78: + goto st78 + case 79: + goto st79 + case 18: + goto st18 + case 19: + goto st19 + case 20: + goto st20 + case 80: + goto st80 + case 21: + goto st21 + case 22: + goto st22 + case 23: + goto st23 + case 81: + goto st81 + case 24: + goto st24 + case 25: + goto st25 + case 82: + goto st82 + case 83: + goto st83 + case 26: + goto st26 + case 27: + goto st27 + case 28: + goto st28 + case 29: + goto st29 + case 30: + goto st30 + case 31: + goto st31 + case 32: + goto st32 + case 33: + goto st33 + case 34: + goto st34 + case 84: + goto st84 + case 37: + goto st37 + case 86: + goto st86 + case 87: + goto st87 + case 38: + goto st38 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 + case 88: + goto st88 + case 42: + goto st42 + case 89: + goto st89 + case 43: + goto st43 + case 44: + goto st44 + case 45: + goto st45 + case 85: + goto st85 + case 35: + goto st35 + case 36: + goto st36 + } - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof - } -_resume: - switch ( m.cs) { - case 269: - goto st_case_269 - case 1: - goto st_case_1 - case 2: - goto st_case_2 - case 3: - goto st_case_3 - case 0: - goto st_case_0 - case 4: - goto st_case_4 - case 5: - goto st_case_5 - case 6: - goto st_case_6 - case 270: - goto st_case_270 - case 271: - goto st_case_271 - case 272: - goto st_case_272 - case 7: - goto st_case_7 - case 8: - goto st_case_8 - case 9: - goto st_case_9 - case 10: - goto st_case_10 - case 11: - goto st_case_11 - case 12: - goto st_case_12 - case 13: - goto st_case_13 - case 14: - goto st_case_14 - case 15: - goto st_case_15 - case 16: - goto st_case_16 - case 17: - goto st_case_17 - case 18: - goto st_case_18 - case 19: - goto st_case_19 - case 20: - goto st_case_20 - case 21: - goto st_case_21 - case 22: - goto st_case_22 - case 23: - goto st_case_23 - case 24: - goto st_case_24 - case 25: - goto st_case_25 - case 26: - goto st_case_26 - case 27: - goto st_case_27 - case 28: - goto st_case_28 - case 29: - goto st_case_29 - case 30: - goto st_case_30 - case 31: - goto st_case_31 - case 273: - goto st_case_273 - case 274: - goto st_case_274 - case 32: - goto st_case_32 - case 33: - goto st_case_33 - case 275: - goto st_case_275 - case 276: - goto st_case_276 - case 277: - goto st_case_277 - case 34: - goto st_case_34 - case 278: - goto st_case_278 - case 279: - goto st_case_279 - case 280: - goto st_case_280 - case 281: - goto st_case_281 - case 282: - goto st_case_282 - case 283: - goto st_case_283 - case 284: - goto st_case_284 - case 285: - goto st_case_285 - case 286: - goto st_case_286 - case 287: - goto st_case_287 - case 288: - goto st_case_288 - case 289: - goto st_case_289 - case 290: - goto st_case_290 - case 291: - goto st_case_291 - case 292: - goto st_case_292 - case 293: - goto st_case_293 - case 294: - goto st_case_294 - case 295: - goto st_case_295 - case 35: - goto st_case_35 - case 36: - goto st_case_36 - case 296: - goto st_case_296 - case 297: - goto st_case_297 - case 298: - goto st_case_298 - case 37: - goto st_case_37 - case 38: - goto st_case_38 - case 39: - goto st_case_39 - case 40: - goto st_case_40 - case 41: - goto st_case_41 - case 299: - goto st_case_299 - case 300: - goto st_case_300 - case 301: - goto st_case_301 - case 302: - goto st_case_302 - case 42: - goto st_case_42 - case 303: - goto st_case_303 - case 304: - goto st_case_304 - case 305: - goto st_case_305 - case 306: - goto st_case_306 - case 307: - goto st_case_307 - case 308: - goto st_case_308 - case 309: - goto st_case_309 - case 310: - goto st_case_310 - case 311: - goto st_case_311 - case 312: - goto st_case_312 - case 313: - goto st_case_313 - case 314: - goto st_case_314 - case 315: - goto st_case_315 - case 316: - goto st_case_316 - case 317: - goto st_case_317 - case 318: - goto st_case_318 - case 319: - goto st_case_319 - case 320: - goto st_case_320 - case 321: - goto st_case_321 - case 322: - goto st_case_322 - case 323: - goto st_case_323 - case 324: - goto st_case_324 - case 43: - goto st_case_43 - case 44: - goto st_case_44 - case 45: - goto st_case_45 - case 46: - goto st_case_46 - case 47: - goto st_case_47 - case 48: - goto st_case_48 - case 49: - goto st_case_49 - case 50: - goto st_case_50 - case 51: - goto st_case_51 - case 52: - goto st_case_52 - case 325: - goto st_case_325 - case 326: - goto st_case_326 - case 327: - goto st_case_327 - case 53: - goto st_case_53 - case 54: - goto st_case_54 - case 55: - goto st_case_55 - case 56: - goto st_case_56 - case 57: - goto st_case_57 - case 58: - goto st_case_58 - case 328: - goto st_case_328 - case 329: - goto st_case_329 - case 59: - goto st_case_59 - case 330: - goto st_case_330 - case 331: - goto st_case_331 - case 332: - goto st_case_332 - case 333: - goto st_case_333 - case 334: - goto st_case_334 - case 335: - goto st_case_335 - case 336: - goto st_case_336 - case 337: - goto st_case_337 - case 338: - goto st_case_338 - case 339: - goto st_case_339 - case 340: - goto st_case_340 - case 341: - goto st_case_341 - case 342: - goto st_case_342 - case 343: - goto st_case_343 - case 344: - goto st_case_344 - case 345: - goto st_case_345 - case 346: - goto st_case_346 - case 347: - goto st_case_347 - case 348: - goto st_case_348 - case 349: - goto st_case_349 - case 60: - goto st_case_60 - case 350: - goto st_case_350 - case 351: - goto st_case_351 - case 352: - goto st_case_352 - case 61: - goto st_case_61 - case 353: - goto st_case_353 - case 354: - goto st_case_354 - case 355: - goto st_case_355 - case 356: - goto st_case_356 - case 357: - goto st_case_357 - case 358: - goto st_case_358 - case 359: - goto st_case_359 - case 360: - goto st_case_360 - case 361: - goto st_case_361 - case 362: - goto st_case_362 - case 363: - goto st_case_363 - case 364: - goto st_case_364 - case 365: - goto st_case_365 - case 366: - goto st_case_366 - case 367: - goto st_case_367 - case 368: - goto st_case_368 - case 369: - goto st_case_369 - case 370: - goto st_case_370 - case 371: - goto st_case_371 - case 372: - goto st_case_372 - case 62: - goto st_case_62 - case 63: - goto st_case_63 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 - case 373: - goto st_case_373 - case 67: - goto st_case_67 - case 68: - goto st_case_68 - case 69: - goto st_case_69 - case 70: - goto st_case_70 - case 71: - goto st_case_71 - case 374: - goto st_case_374 - case 375: - goto st_case_375 - case 376: - goto st_case_376 - case 72: - goto st_case_72 - case 73: - goto st_case_73 - case 74: - goto st_case_74 - case 377: - goto st_case_377 - case 378: - goto st_case_378 - case 379: - goto st_case_379 - case 75: - goto st_case_75 - case 380: - goto st_case_380 - case 381: - goto st_case_381 - case 382: - goto st_case_382 - case 383: - goto st_case_383 - case 384: - goto st_case_384 - case 385: - goto st_case_385 - case 386: - goto st_case_386 - case 387: - goto st_case_387 - case 388: - goto st_case_388 - case 389: - goto st_case_389 - case 390: - goto st_case_390 - case 391: - goto st_case_391 - case 392: - goto st_case_392 - case 393: - goto st_case_393 - case 394: - goto st_case_394 - case 395: - goto st_case_395 - case 396: - goto st_case_396 - case 397: - goto st_case_397 - case 398: - goto st_case_398 - case 399: - goto st_case_399 - case 76: - goto st_case_76 - case 77: - goto st_case_77 - case 78: - goto st_case_78 - case 79: - goto st_case_79 - case 80: - goto st_case_80 - case 81: - goto st_case_81 - case 82: - goto st_case_82 - case 83: - goto st_case_83 - case 84: - goto st_case_84 - case 85: - goto st_case_85 - case 86: - goto st_case_86 - case 87: - goto st_case_87 - case 88: - goto st_case_88 - case 89: - goto st_case_89 - case 400: - goto st_case_400 - case 401: - goto st_case_401 - case 402: - goto st_case_402 - case 403: - goto st_case_403 - case 90: - goto st_case_90 - case 91: - goto st_case_91 - case 92: - goto st_case_92 - case 93: - goto st_case_93 - case 404: - goto st_case_404 - case 405: - goto st_case_405 - case 94: - goto st_case_94 - case 95: - goto st_case_95 - case 406: - goto st_case_406 - case 96: - goto st_case_96 - case 97: - goto st_case_97 - case 407: - goto st_case_407 - case 408: - goto st_case_408 - case 98: - goto st_case_98 - case 409: - goto st_case_409 - case 410: - goto st_case_410 - case 99: - goto st_case_99 - case 100: - goto st_case_100 - case 411: - goto st_case_411 - case 412: - goto st_case_412 - case 413: - goto st_case_413 - case 414: - goto st_case_414 - case 415: - goto st_case_415 - case 416: - goto st_case_416 - case 417: - goto st_case_417 - case 418: - goto st_case_418 - case 419: - goto st_case_419 - case 420: - goto st_case_420 - case 421: - goto st_case_421 - case 422: - goto st_case_422 - case 423: - goto st_case_423 - case 424: - goto st_case_424 - case 425: - goto st_case_425 - case 426: - goto st_case_426 - case 427: - goto st_case_427 - case 428: - goto st_case_428 - case 101: - goto st_case_101 - case 429: - goto st_case_429 - case 430: - goto st_case_430 - case 431: - goto st_case_431 - case 102: - goto st_case_102 - case 103: - goto st_case_103 - case 432: - goto st_case_432 - case 433: - goto st_case_433 - case 434: - goto st_case_434 - case 104: - goto st_case_104 - case 435: - goto st_case_435 - case 436: - goto st_case_436 - case 437: - goto st_case_437 - case 438: - goto st_case_438 - case 439: - goto st_case_439 - case 440: - goto st_case_440 - case 441: - goto st_case_441 - case 442: - goto st_case_442 - case 443: - goto st_case_443 - case 444: - goto st_case_444 - case 445: - goto st_case_445 - case 446: - goto st_case_446 - case 447: - goto st_case_447 - case 448: - goto st_case_448 - case 449: - goto st_case_449 - case 450: - goto st_case_450 - case 451: - goto st_case_451 - case 452: - goto st_case_452 - case 453: - goto st_case_453 - case 454: - goto st_case_454 - case 105: - goto st_case_105 - case 455: - goto st_case_455 - case 456: - goto st_case_456 - case 457: - goto st_case_457 - case 458: - goto st_case_458 - case 459: - goto st_case_459 - case 460: - goto st_case_460 - case 461: - goto st_case_461 - case 462: - goto st_case_462 - case 463: - goto st_case_463 - case 464: - goto st_case_464 - case 465: - goto st_case_465 - case 466: - goto st_case_466 - case 467: - goto st_case_467 - case 468: - goto st_case_468 - case 469: - goto st_case_469 - case 470: - goto st_case_470 - case 471: - goto st_case_471 - case 472: - goto st_case_472 - case 473: - goto st_case_473 - case 474: - goto st_case_474 - case 475: - goto st_case_475 - case 476: - goto st_case_476 - case 106: - goto st_case_106 - case 107: - goto st_case_107 - case 108: - goto st_case_108 - case 109: - goto st_case_109 - case 110: - goto st_case_110 - case 477: - goto st_case_477 - case 111: - goto st_case_111 - case 478: - goto st_case_478 - case 479: - goto st_case_479 - case 112: - goto st_case_112 - case 480: - goto st_case_480 - case 481: - goto st_case_481 - case 482: - goto st_case_482 - case 483: - goto st_case_483 - case 484: - goto st_case_484 - case 485: - goto st_case_485 - case 486: - goto st_case_486 - case 487: - goto st_case_487 - case 488: - goto st_case_488 - case 113: - goto st_case_113 - case 114: - goto st_case_114 - case 115: - goto st_case_115 - case 489: - goto st_case_489 - case 116: - goto st_case_116 - case 117: - goto st_case_117 - case 118: - goto st_case_118 - case 490: - goto st_case_490 - case 119: - goto st_case_119 - case 120: - goto st_case_120 - case 491: - goto st_case_491 - case 492: - goto st_case_492 - case 121: - goto st_case_121 - case 122: - goto st_case_122 - case 123: - goto st_case_123 - case 124: - goto st_case_124 - case 493: - goto st_case_493 - case 494: - goto st_case_494 - case 495: - goto st_case_495 - case 125: - goto st_case_125 - case 496: - goto st_case_496 - case 497: - goto st_case_497 - case 498: - goto st_case_498 - case 499: - goto st_case_499 - case 500: - goto st_case_500 - case 501: - goto st_case_501 - case 502: - goto st_case_502 - case 503: - goto st_case_503 - case 504: - goto st_case_504 - case 505: - goto st_case_505 - case 506: - goto st_case_506 - case 507: - goto st_case_507 - case 508: - goto st_case_508 - case 509: - goto st_case_509 - case 510: - goto st_case_510 - case 511: - goto st_case_511 - case 512: - goto st_case_512 - case 513: - goto st_case_513 - case 514: - goto st_case_514 - case 515: - goto st_case_515 - case 126: - goto st_case_126 - case 127: - goto st_case_127 - case 516: - goto st_case_516 - case 517: - goto st_case_517 - case 518: - goto st_case_518 - case 519: - goto st_case_519 - case 520: - goto st_case_520 - case 521: - goto st_case_521 - case 522: - goto st_case_522 - case 523: - goto st_case_523 - case 524: - goto st_case_524 - case 128: - goto st_case_128 - case 129: - goto st_case_129 - case 130: - goto st_case_130 - case 525: - goto st_case_525 - case 131: - goto st_case_131 - case 132: - goto st_case_132 - case 133: - goto st_case_133 - case 526: - goto st_case_526 - case 134: - goto st_case_134 - case 135: - goto st_case_135 - case 527: - goto st_case_527 - case 528: - goto st_case_528 - case 136: - goto st_case_136 - case 137: - goto st_case_137 - case 138: - goto st_case_138 - case 529: - goto st_case_529 - case 530: - goto st_case_530 - case 139: - goto st_case_139 - case 531: - goto st_case_531 - case 140: - goto st_case_140 - case 532: - goto st_case_532 - case 533: - goto st_case_533 - case 534: - goto st_case_534 - case 535: - goto st_case_535 - case 536: - goto st_case_536 - case 537: - goto st_case_537 - case 538: - goto st_case_538 - case 539: - goto st_case_539 - case 141: - goto st_case_141 - case 142: - goto st_case_142 - case 143: - goto st_case_143 - case 540: - goto st_case_540 - case 144: - goto st_case_144 - case 145: - goto st_case_145 - case 146: - goto st_case_146 - case 541: - goto st_case_541 - case 147: - goto st_case_147 - case 148: - goto st_case_148 - case 542: - goto st_case_542 - case 543: - goto st_case_543 - case 544: - goto st_case_544 - case 545: - goto st_case_545 - case 546: - goto st_case_546 - case 547: - goto st_case_547 - case 548: - goto st_case_548 - case 549: - goto st_case_549 - case 550: - goto st_case_550 - case 551: - goto st_case_551 - case 552: - goto st_case_552 - case 553: - goto st_case_553 - case 554: - goto st_case_554 - case 555: - goto st_case_555 - case 556: - goto st_case_556 - case 557: - goto st_case_557 - case 558: - goto st_case_558 - case 559: - goto st_case_559 - case 560: - goto st_case_560 - case 561: - goto st_case_561 - case 149: - goto st_case_149 - case 150: - goto st_case_150 - case 562: - goto st_case_562 - case 563: - goto st_case_563 - case 564: - goto st_case_564 - case 151: - goto st_case_151 - case 565: - goto st_case_565 - case 566: - goto st_case_566 - case 152: - goto st_case_152 - case 567: - goto st_case_567 - case 568: - goto st_case_568 - case 569: - goto st_case_569 - case 570: - goto st_case_570 - case 571: - goto st_case_571 - case 572: - goto st_case_572 - case 573: - goto st_case_573 - case 574: - goto st_case_574 - case 575: - goto st_case_575 - case 576: - goto st_case_576 - case 577: - goto st_case_577 - case 578: - goto st_case_578 - case 579: - goto st_case_579 - case 580: - goto st_case_580 - case 581: - goto st_case_581 - case 582: - goto st_case_582 - case 583: - goto st_case_583 - case 584: - goto st_case_584 - case 153: - goto st_case_153 - case 154: - goto st_case_154 - case 585: - goto st_case_585 - case 155: - goto st_case_155 - case 586: - goto st_case_586 - case 587: - goto st_case_587 - case 588: - goto st_case_588 - case 589: - goto st_case_589 - case 590: - goto st_case_590 - case 591: - goto st_case_591 - case 592: - goto st_case_592 - case 593: - goto st_case_593 - case 156: - goto st_case_156 - case 157: - goto st_case_157 - case 158: - goto st_case_158 - case 594: - goto st_case_594 - case 159: - goto st_case_159 - case 160: - goto st_case_160 - case 161: - goto st_case_161 - case 595: - goto st_case_595 - case 162: - goto st_case_162 - case 163: - goto st_case_163 - case 596: - goto st_case_596 - case 597: - goto st_case_597 - case 164: - goto st_case_164 - case 165: - goto st_case_165 - case 166: - goto st_case_166 - case 167: - goto st_case_167 - case 168: - goto st_case_168 - case 169: - goto st_case_169 - case 598: - goto st_case_598 - case 599: - goto st_case_599 - case 600: - goto st_case_600 - case 601: - goto st_case_601 - case 602: - goto st_case_602 - case 603: - goto st_case_603 - case 604: - goto st_case_604 - case 605: - goto st_case_605 - case 606: - goto st_case_606 - case 607: - goto st_case_607 - case 608: - goto st_case_608 - case 609: - goto st_case_609 - case 610: - goto st_case_610 - case 611: - goto st_case_611 - case 612: - goto st_case_612 - case 613: - goto st_case_613 - case 614: - goto st_case_614 - case 615: - goto st_case_615 - case 616: - goto st_case_616 - case 170: - goto st_case_170 - case 171: - goto st_case_171 - case 172: - goto st_case_172 - case 617: - goto st_case_617 - case 618: - goto st_case_618 - case 619: - goto st_case_619 - case 173: - goto st_case_173 - case 620: - goto st_case_620 - case 621: - goto st_case_621 - case 174: - goto st_case_174 - case 622: - goto st_case_622 - case 623: - goto st_case_623 - case 624: - goto st_case_624 - case 625: - goto st_case_625 - case 626: - goto st_case_626 - case 175: - goto st_case_175 - case 176: - goto st_case_176 - case 177: - goto st_case_177 - case 627: - goto st_case_627 - case 178: - goto st_case_178 - case 179: - goto st_case_179 - case 180: - goto st_case_180 - case 628: - goto st_case_628 - case 181: - goto st_case_181 - case 182: - goto st_case_182 - case 629: - goto st_case_629 - case 630: - goto st_case_630 - case 183: - goto st_case_183 - case 631: - goto st_case_631 - case 632: - goto st_case_632 - case 633: - goto st_case_633 - case 184: - goto st_case_184 - case 185: - goto st_case_185 - case 186: - goto st_case_186 - case 634: - goto st_case_634 - case 187: - goto st_case_187 - case 188: - goto st_case_188 - case 189: - goto st_case_189 - case 635: - goto st_case_635 - case 190: - goto st_case_190 - case 191: - goto st_case_191 - case 636: - goto st_case_636 - case 637: - goto st_case_637 - case 192: - goto st_case_192 - case 193: - goto st_case_193 - case 194: - goto st_case_194 - case 638: - goto st_case_638 - case 195: - goto st_case_195 - case 196: - goto st_case_196 - case 639: - goto st_case_639 - case 640: - goto st_case_640 - case 641: - goto st_case_641 - case 642: - goto st_case_642 - case 643: - goto st_case_643 - case 644: - goto st_case_644 - case 645: - goto st_case_645 - case 646: - goto st_case_646 - case 197: - goto st_case_197 - case 198: - goto st_case_198 - case 199: - goto st_case_199 - case 647: - goto st_case_647 - case 200: - goto st_case_200 - case 201: - goto st_case_201 - case 202: - goto st_case_202 - case 648: - goto st_case_648 - case 203: - goto st_case_203 - case 204: - goto st_case_204 - case 649: - goto st_case_649 - case 650: - goto st_case_650 - case 205: - goto st_case_205 - case 206: - goto st_case_206 - case 207: - goto st_case_207 - case 651: - goto st_case_651 - case 652: - goto st_case_652 - case 653: - goto st_case_653 - case 654: - goto st_case_654 - case 655: - goto st_case_655 - case 656: - goto st_case_656 - case 657: - goto st_case_657 - case 658: - goto st_case_658 - case 659: - goto st_case_659 - case 660: - goto st_case_660 - case 661: - goto st_case_661 - case 662: - goto st_case_662 - case 663: - goto st_case_663 - case 664: - goto st_case_664 - case 665: - goto st_case_665 - case 666: - goto st_case_666 - case 667: - goto st_case_667 - case 668: - goto st_case_668 - case 669: - goto st_case_669 - case 208: - goto st_case_208 - case 209: - goto st_case_209 - case 210: - goto st_case_210 - case 211: - goto st_case_211 - case 212: - goto st_case_212 - case 670: - goto st_case_670 - case 213: - goto st_case_213 - case 214: - goto st_case_214 - case 671: - goto st_case_671 - case 672: - goto st_case_672 - case 673: - goto st_case_673 - case 674: - goto st_case_674 - case 675: - goto st_case_675 - case 676: - goto st_case_676 - case 677: - goto st_case_677 - case 678: - goto st_case_678 - case 679: - goto st_case_679 - case 215: - goto st_case_215 - case 216: - goto st_case_216 - case 217: - goto st_case_217 - case 680: - goto st_case_680 - case 218: - goto st_case_218 - case 219: - goto st_case_219 - case 220: - goto st_case_220 - case 681: - goto st_case_681 - case 221: - goto st_case_221 - case 222: - goto st_case_222 - case 682: - goto st_case_682 - case 683: - goto st_case_683 - case 223: - goto st_case_223 - case 224: - goto st_case_224 - case 225: - goto st_case_225 - case 684: - goto st_case_684 - case 226: - goto st_case_226 - case 227: - goto st_case_227 - case 685: - goto st_case_685 - case 686: - goto st_case_686 - case 687: - goto st_case_687 - case 688: - goto st_case_688 - case 689: - goto st_case_689 - case 690: - goto st_case_690 - case 691: - goto st_case_691 - case 692: - goto st_case_692 - case 228: - goto st_case_228 - case 229: - goto st_case_229 - case 230: - goto st_case_230 - case 693: - goto st_case_693 - case 231: - goto st_case_231 - case 232: - goto st_case_232 - case 694: - goto st_case_694 - case 695: - goto st_case_695 - case 696: - goto st_case_696 - case 697: - goto st_case_697 - case 698: - goto st_case_698 - case 699: - goto st_case_699 - case 700: - goto st_case_700 - case 701: - goto st_case_701 - case 233: - goto st_case_233 - case 234: - goto st_case_234 - case 235: - goto st_case_235 - case 702: - goto st_case_702 - case 236: - goto st_case_236 - case 237: - goto st_case_237 - case 238: - goto st_case_238 - case 703: - goto st_case_703 - case 239: - goto st_case_239 - case 240: - goto st_case_240 - case 704: - goto st_case_704 - case 705: - goto st_case_705 - case 241: - goto st_case_241 - case 242: - goto st_case_242 - case 243: - goto st_case_243 - case 706: - goto st_case_706 - case 707: - goto st_case_707 - case 708: - goto st_case_708 - case 709: - goto st_case_709 - case 710: - goto st_case_710 - case 711: - goto st_case_711 - case 712: - goto st_case_712 - case 713: - goto st_case_713 - case 714: - goto st_case_714 - case 715: - goto st_case_715 - case 716: - goto st_case_716 - case 717: - goto st_case_717 - case 718: - goto st_case_718 - case 719: - goto st_case_719 - case 720: - goto st_case_720 - case 721: - goto st_case_721 - case 722: - goto st_case_722 - case 723: - goto st_case_723 - case 724: - goto st_case_724 - case 244: - goto st_case_244 - case 245: - goto st_case_245 - case 725: - goto st_case_725 - case 246: - goto st_case_246 - case 247: - goto st_case_247 - case 726: - goto st_case_726 - case 727: - goto st_case_727 - case 728: - goto st_case_728 - case 729: - goto st_case_729 - case 730: - goto st_case_730 - case 731: - goto st_case_731 - case 732: - goto st_case_732 - case 733: - goto st_case_733 - case 248: - goto st_case_248 - case 249: - goto st_case_249 - case 250: - goto st_case_250 - case 734: - goto st_case_734 - case 251: - goto st_case_251 - case 252: - goto st_case_252 - case 253: - goto st_case_253 - case 735: - goto st_case_735 - case 254: - goto st_case_254 - case 255: - goto st_case_255 - case 736: - goto st_case_736 - case 737: - goto st_case_737 - case 256: - goto st_case_256 - case 257: - goto st_case_257 - case 738: - goto st_case_738 - case 260: - goto st_case_260 - case 740: - goto st_case_740 - case 741: - goto st_case_741 - case 261: - goto st_case_261 - case 262: - goto st_case_262 - case 263: - goto st_case_263 - case 264: - goto st_case_264 - case 742: - goto st_case_742 - case 265: - goto st_case_265 - case 743: - goto st_case_743 - case 266: - goto st_case_266 - case 267: - goto st_case_267 - case 268: - goto st_case_268 - case 739: - goto st_case_739 - case 258: - goto st_case_258 - case 259: - goto st_case_259 - } - goto st_out - st269: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof269 + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof } - st_case_269: - switch ( m.data)[( m.p)] { - case 10: - goto tr33 + _resume: + switch m.cs { + case 46: + goto st_case_46 + case 1: + goto st_case_1 + case 2: + goto st_case_2 + case 3: + goto st_case_3 + case 0: + goto st_case_0 + case 4: + goto st_case_4 + case 5: + goto st_case_5 + case 6: + goto st_case_6 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 + case 7: + goto st_case_7 + case 8: + goto st_case_8 + case 9: + goto st_case_9 + case 10: + goto st_case_10 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 + case 53: + goto st_case_53 + case 54: + goto st_case_54 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 + case 59: + goto st_case_59 + case 60: + goto st_case_60 + case 61: + goto st_case_61 + case 62: + goto st_case_62 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 + case 68: + goto st_case_68 + case 69: + goto st_case_69 case 11: - goto tr457 + goto st_case_11 + case 12: + goto st_case_12 + case 13: + goto st_case_13 + case 14: + goto st_case_14 + case 15: + goto st_case_15 + case 70: + goto st_case_70 + case 16: + goto st_case_16 + case 17: + goto st_case_17 + case 71: + goto st_case_71 + case 72: + goto st_case_72 + case 73: + goto st_case_73 + case 74: + goto st_case_74 + case 75: + goto st_case_75 + case 76: + goto st_case_76 + case 77: + goto st_case_77 + case 78: + goto st_case_78 + case 79: + goto st_case_79 + case 18: + goto st_case_18 + case 19: + goto st_case_19 + case 20: + goto st_case_20 + case 80: + goto st_case_80 + case 21: + goto st_case_21 + case 22: + goto st_case_22 + case 23: + goto st_case_23 + case 81: + goto st_case_81 + case 24: + goto st_case_24 + case 25: + goto st_case_25 + case 82: + goto st_case_82 + case 83: + goto st_case_83 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 + case 29: + goto st_case_29 + case 30: + goto st_case_30 + case 31: + goto st_case_31 + case 32: + goto st_case_32 + case 33: + goto st_case_33 + case 34: + goto st_case_34 + case 84: + goto st_case_84 + case 37: + goto st_case_37 + case 86: + goto st_case_86 + case 87: + goto st_case_87 + case 38: + goto st_case_38 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 + case 88: + goto st_case_88 + case 42: + goto st_case_42 + case 89: + goto st_case_89 + case 43: + goto st_case_43 + case 44: + goto st_case_44 + case 45: + goto st_case_45 + case 85: + goto st_case_85 + case 35: + goto st_case_35 + case 36: + goto st_case_36 + } + goto st_out + st46: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof46 + } + st_case_46: + switch (m.data)[(m.p)] { + case 10: + goto tr31 case 13: - goto tr33 + goto tr31 case 32: - goto tr456 + goto tr80 case 35: - goto tr33 + goto tr31 case 44: - goto tr33 + goto tr31 case 92: - goto tr458 + goto tr81 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr456 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr80 } - goto tr455 -tr31: + goto tr79 + tr29: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 -tr455: + goto st1 + tr79: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 + goto st1 st1: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof1 } st_case_1: -//line plugins/parsers/influx/machine.go:3208 - switch ( m.data)[( m.p)] { +//line plugins/parsers/influx/machine.go:590 + switch (m.data)[(m.p)] { case 10: goto tr2 - case 11: - goto tr3 case 13: goto tr2 case 32: goto tr1 case 44: - goto tr4 + goto tr3 case 92: - goto st94 + goto st8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr1 } goto st1 -tr1: - ( m.cs) = 2 + tr1: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr58: - ( m.cs) = 2 + goto _again + tr56: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st2: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof2 } st_case_2: -//line plugins/parsers/influx/machine.go:3258 - switch ( m.data)[( m.p)] { +//line plugins/parsers/influx/machine.go:638 + switch (m.data)[(m.p)] { case 10: - goto tr8 - case 11: - goto tr9 + goto tr7 case 13: - goto tr8 + goto tr7 case 32: goto st2 case 44: - goto tr8 + goto tr7 case 61: - goto tr8 + goto tr7 case 92: - goto tr10 + goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st2 } - goto tr6 -tr6: + goto tr5 + tr5: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st3 + goto st3 st3: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof3 } st_case_3: -//line plugins/parsers/influx/machine.go:3290 - switch ( m.data)[( m.p)] { +//line plugins/parsers/influx/machine.go:668 + switch (m.data)[(m.p)] { case 32: - goto tr8 + goto tr7 case 44: - goto tr8 + goto tr7 case 61: - goto tr12 + goto tr10 case 92: - goto st34 + goto st12 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr7 } goto st3 -tr2: - ( m.cs) = 0 + tr2: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr8: - ( m.cs) = 0 + goto _again + tr7: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr33: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr37: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr41: - ( m.cs) = 0 + goto _again + tr31: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr45: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } + err = ErrNameParse + (m.p)-- -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr103: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + goto _again + tr35: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr130: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + goto _again + tr82: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr196: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr421: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:32 + (m.cs) = 34 + { + (m.p)++ + goto _out + } - err = ErrNameParse - ( m.p)-- + goto _again + tr135: +//line plugins/parsers/influx/machine.go.rl:73 - ( m.cs) = 257; - {( m.p)++; goto _out } + (m.p)-- -//line plugins/parsers/influx/machine.go.rl:46 + { + goto st46 + } - err = ErrTagParse - ( m.p)-- + goto st0 +//line plugins/parsers/influx/machine.go:754 + st_case_0: + st0: + (m.cs) = 0 + goto _out + tr10: +//line plugins/parsers/influx/machine.go.rl:108 - ( m.cs) = 257; - {( m.p)++; goto _out } + m.key = m.text() -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr424: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr1053: -//line plugins/parsers/influx/machine.go.rl:73 - - ( m.p)-- - - {goto st269 } - - goto st0 -//line plugins/parsers/influx/machine.go:3511 -st_case_0: - st0: - ( m.cs) = 0 - goto _out -tr12: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st4 + goto st4 st4: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof4 } st_case_4: -//line plugins/parsers/influx/machine.go:3527 - switch ( m.data)[( m.p)] { +//line plugins/parsers/influx/machine.go:770 + switch (m.data)[(m.p)] { case 34: goto st5 case 45: - goto tr15 + goto tr13 case 46: - goto tr16 + goto tr14 case 48: - goto tr17 + goto tr15 case 70: - goto tr19 + goto tr17 case 84: - goto tr20 + goto tr18 case 102: - goto tr21 + goto tr19 case 116: - goto tr22 + goto tr20 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr18 + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr16 } - goto tr8 + goto tr7 st5: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof5 } st_case_5: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: - goto tr24 + goto tr22 case 34: - goto tr25 + goto tr23 case 92: - goto tr26 + goto tr24 } - goto tr23 -tr23: + goto tr21 + tr21: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st6 -tr24: + goto st6 + tr22: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 -tr28: + goto st6 + tr26: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 + goto st6 st6: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof6 } st_case_6: -//line plugins/parsers/influx/machine.go:3595 - switch ( m.data)[( m.p)] { +//line plugins/parsers/influx/machine.go:838 + switch (m.data)[(m.p)] { case 10: - goto tr28 + goto tr26 case 34: - goto tr29 + goto tr27 case 92: - goto st73 + goto st13 } goto st6 -tr25: - ( m.cs) = 270 + tr23: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr29: - ( m.cs) = 270 + goto _again + tr27: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again - st270: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof270 + goto _again + st47: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof47 } - st_case_270: -//line plugins/parsers/influx/machine.go:3640 - switch ( m.data)[( m.p)] { + st_case_47: +//line plugins/parsers/influx/machine.go:883 + switch (m.data)[(m.p)] { case 10: - goto tr101 + goto tr34 case 13: - goto st32 + goto st9 case 32: - goto st271 + goto st48 case 44: - goto st35 + goto st11 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto st48 } - goto tr103 -tr921: - ( m.cs) = 271 + goto tr82 + tr110: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr1041: - ( m.cs) = 271 + goto _again + tr117: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr1044: - ( m.cs) = 271 + goto _again + tr122: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr1047: - ( m.cs) = 271 + goto _again + tr127: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again - st271: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof271 + goto _again + st48: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof48 } - st_case_271: -//line plugins/parsers/influx/machine.go:3712 - switch ( m.data)[( m.p)] { + st_case_48: +//line plugins/parsers/influx/machine.go:955 + switch (m.data)[(m.p)] { case 10: - goto tr101 + goto tr34 case 13: - goto st32 + goto st9 case 32: - goto st271 + goto st48 case 45: - goto tr462 + goto tr86 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr463 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr87 } - case ( m.data)[( m.p)] >= 9: - goto st271 + case (m.data)[(m.p)] >= 9: + goto st48 } - goto tr424 -tr101: + goto tr35 + tr34: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st272 -tr468: - ( m.cs) = 272 + goto st49 + tr89: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr730: - ( m.cs) = 272 + goto _again + tr111: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr942: - ( m.cs) = 272 + goto _again + tr118: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr948: - ( m.cs) = 272 + goto _again + tr123: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr954: - ( m.cs) = 272 + goto _again + tr128: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again - st272: + goto _again + st49: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 739; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof272 + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof49 } - st_case_272: -//line plugins/parsers/influx/machine.go:3846 - switch ( m.data)[( m.p)] { + st_case_49: +//line plugins/parsers/influx/machine.go:1089 + switch (m.data)[(m.p)] { case 10: - goto tr33 - case 11: - goto tr34 + goto tr31 case 13: - goto tr33 + goto tr31 case 32: goto st7 case 35: - goto tr33 + goto tr31 case 44: - goto tr33 + goto tr31 case 92: - goto tr35 + goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } - goto tr31 -tr456: + goto tr29 + tr80: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true - goto st7 + goto st7 st7: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof7 } st_case_7: -//line plugins/parsers/influx/machine.go:3878 - switch ( m.data)[( m.p)] { +//line plugins/parsers/influx/machine.go:1119 + switch (m.data)[(m.p)] { case 10: - goto tr33 - case 11: - goto tr34 + goto tr31 case 13: - goto tr33 + goto tr31 case 32: goto st7 case 35: - goto tr33 + goto tr31 case 44: - goto tr33 + goto tr31 case 92: - goto tr35 + goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } - goto tr31 -tr34: + goto tr29 + tr32: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 -tr457: + goto st8 + tr81: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 + goto st8 st8: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof8 } st_case_8: -//line plugins/parsers/influx/machine.go:3920 - switch ( m.data)[( m.p)] { - case 10: - goto tr37 - case 11: - goto tr38 - case 13: - goto tr37 - case 32: - goto tr36 - case 35: - goto st1 - case 44: - goto tr4 - case 92: - goto tr35 +//line plugins/parsers/influx/machine.go:1159 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto st0 + } + goto st1 + tr90: + (m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr36 + + goto _again + tr112: + (m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } } - goto tr31 -tr36: - ( m.cs) = 9 -//line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + goto _again + tr119: + (m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:112 - ( m.cs) = 257; - {( m.p)++; goto _out } - } + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } + + goto _again + tr124: + (m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } + + goto _again + tr129: + (m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st9: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof9 } st_case_9: -//line plugins/parsers/influx/machine.go:3959 - switch ( m.data)[( m.p)] { - case 10: - goto tr41 - case 11: - goto tr42 - case 13: - goto tr41 - case 32: - goto st9 - case 35: - goto tr6 - case 44: - goto tr41 - case 61: - goto tr31 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st9 +//line plugins/parsers/influx/machine.go:1234 + if (m.data)[(m.p)] == 10 { + goto tr34 } - goto tr39 -tr39: + goto st0 + tr86: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st10 + goto st10 st10: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof10 } st_case_10: -//line plugins/parsers/influx/machine.go:3993 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr46 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 +//line plugins/parsers/influx/machine.go:1250 + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st50 } - goto st10 -tr46: - ( m.cs) = 11 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr49: - ( m.cs) = 11 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - + goto tr35 + tr87: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto _again - st11: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof11 + goto st50 + st50: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof50 } - st_case_11: -//line plugins/parsers/influx/machine.go:4049 - switch ( m.data)[( m.p)] { + st_case_50: +//line plugins/parsers/influx/machine.go:1266 + switch (m.data)[(m.p)] { case 10: - goto tr45 - case 11: - goto tr49 + goto tr89 case 13: - goto tr45 + goto tr90 case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto tr43 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 + switch { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st52 + } + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr39 -tr4: - ( m.cs) = 12 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr60: - ( m.cs) = 12 -//line plugins/parsers/influx/machine.go.rl:99 + goto tr35 + tr88: + (m.cs) = 51 +//line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again - st12: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof12 + goto _again + st51: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof51 } - st_case_12: -//line plugins/parsers/influx/machine.go:4101 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr51 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto tr50 -tr50: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st13 - st13: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof13 - } - st_case_13: -//line plugins/parsers/influx/machine.go:4132 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st13 -tr53: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - - goto st14 - st14: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof14 - } - st_case_14: -//line plugins/parsers/influx/machine.go:4163 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr56 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto tr55 -tr55: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st15 - st15: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof15 - } - st_case_15: -//line plugins/parsers/influx/machine.go:4194 - switch ( m.data)[( m.p)] { - case 10: - goto tr2 - case 11: - goto tr59 - case 13: - goto tr2 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr2 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 -tr59: - ( m.cs) = 16 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st16: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof16 - } - st_case_16: -//line plugins/parsers/influx/machine.go:4233 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr63 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto tr64 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto tr62 -tr62: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st17 - st17: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof17 - } - st_case_17: -//line plugins/parsers/influx/machine.go:4265 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr66 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st17 -tr66: - ( m.cs) = 18 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr63: - ( m.cs) = 18 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st18: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof18 - } - st_case_18: -//line plugins/parsers/influx/machine.go:4321 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr63 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto tr64 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto tr62 -tr64: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st19 - st19: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof19 - } - st_case_19: -//line plugins/parsers/influx/machine.go:4353 - if ( m.data)[( m.p)] == 92 { - goto st20 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st17 - st20: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof20 - } - st_case_20: -//line plugins/parsers/influx/machine.go:4374 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr66 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st17 -tr56: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st21 - st21: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof21 - } - st_case_21: -//line plugins/parsers/influx/machine.go:4406 - if ( m.data)[( m.p)] == 92 { - goto st22 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st15 - st22: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof22 - } - st_case_22: -//line plugins/parsers/influx/machine.go:4427 - switch ( m.data)[( m.p)] { - case 10: - goto tr2 - case 11: - goto tr59 - case 13: - goto tr2 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr2 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 -tr51: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st23 - st23: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof23 - } - st_case_23: -//line plugins/parsers/influx/machine.go:4459 - if ( m.data)[( m.p)] == 92 { - goto st24 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st13 - st24: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof24 - } - st_case_24: -//line plugins/parsers/influx/machine.go:4480 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st13 -tr47: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st25 -tr423: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st25 - st25: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof25 - } - st_case_25: -//line plugins/parsers/influx/machine.go:4521 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 34: - goto st28 - case 44: - goto tr4 - case 45: - goto tr72 - case 46: - goto tr73 - case 48: - goto tr74 - case 70: - goto tr76 - case 84: - goto tr77 - case 92: - goto st94 - case 102: - goto tr78 - case 116: - goto tr79 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr75 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st1 -tr3: - ( m.cs) = 26 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st26: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof26 - } - st_case_26: -//line plugins/parsers/influx/machine.go:4579 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr49 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto st1 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto tr39 -tr43: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st27 - st27: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof27 - } - st_case_27: -//line plugins/parsers/influx/machine.go:4611 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st10 - st28: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof28 - } - st_case_28: - switch ( m.data)[( m.p)] { - case 10: - goto tr24 - case 11: - goto tr82 - case 13: - goto tr23 - case 32: - goto tr81 - case 34: - goto tr83 - case 44: - goto tr84 - case 92: - goto tr85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr81 - } - goto tr80 -tr80: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st29 - st29: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof29 - } - st_case_29: -//line plugins/parsers/influx/machine.go:4657 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 -tr87: - ( m.cs) = 30 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr81: - ( m.cs) = 30 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr229: - ( m.cs) = 30 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st30: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof30 - } - st_case_30: -//line plugins/parsers/influx/machine.go:4726 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr94 - case 13: - goto st6 - case 32: - goto st30 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st30 - } - goto tr92 -tr92: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st31 - st31: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof31 - } - st_case_31: -//line plugins/parsers/influx/machine.go:4760 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st31 -tr95: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr98: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr384: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st273: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof273 - } - st_case_273: -//line plugins/parsers/influx/machine.go:4833 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st274 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - case 61: - goto tr12 - case 92: - goto st34 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 - } - goto st3 - st274: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof274 - } - st_case_274: - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st274 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto tr103 - case 45: - goto tr465 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr466 - } - case ( m.data)[( m.p)] >= 9: - goto st271 - } - goto st3 -tr470: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr732: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr944: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr950: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr956: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st32: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof32 - } - st_case_32: -//line plugins/parsers/influx/machine.go:4956 - if ( m.data)[( m.p)] == 10 { - goto tr101 - } - goto st0 -tr465: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st33 - st33: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof33 - } - st_case_33: -//line plugins/parsers/influx/machine.go:4972 - switch ( m.data)[( m.p)] { - case 32: - goto tr103 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr103 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st275 - } - default: - goto tr103 - } - goto st3 -tr466: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st275 - st275: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof275 - } - st_case_275: -//line plugins/parsers/influx/machine.go:5007 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st278 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 -tr467: - ( m.cs) = 276 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st276: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof276 - } - st_case_276: -//line plugins/parsers/influx/machine.go:5051 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 13: - goto st32 - case 32: - goto st276 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 - } - goto st0 -tr469: - ( m.cs) = 277 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st277: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof277 - } - st_case_277: -//line plugins/parsers/influx/machine.go:5082 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st277 - case 13: - goto st32 - case 32: - goto st276 - case 44: - goto tr8 - case 61: - goto tr12 - case 92: - goto st34 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 - } - goto st3 -tr10: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st34 - st34: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof34 - } - st_case_34: -//line plugins/parsers/influx/machine.go:5114 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st278: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof278 - } - st_case_278: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st279 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st279: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof279 - } - st_case_279: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st280 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st280: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof280 - } - st_case_280: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st281 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st281: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof281 - } - st_case_281: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st282 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st282: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof282 - } - st_case_282: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st283 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st283: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof283 - } - st_case_283: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st284 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st284: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof284 - } - st_case_284: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st285 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st285: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof285 - } - st_case_285: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st286 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st286: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof286 - } - st_case_286: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st287 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st287: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof287 - } - st_case_287: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st288 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st288: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof288 - } - st_case_288: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st289 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st289: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof289 - } - st_case_289: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st290: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof290 - } - st_case_290: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st291 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st291: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof291 - } - st_case_291: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st292: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof292 - } - st_case_292: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st293 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st293: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof293 - } - st_case_293: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st294 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st294: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof294 - } - st_case_294: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st295 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st3 - st295: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof295 - } - st_case_295: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 - } - goto st3 -tr922: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1042: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1045: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1048: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st35: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof35 - } - st_case_35: -//line plugins/parsers/influx/machine.go:5716 - switch ( m.data)[( m.p)] { - case 32: - goto tr8 - case 44: - goto tr8 - case 61: - goto tr8 - case 92: - goto tr10 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto tr6 -tr99: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st36 - st36: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof36 - } - st_case_36: -//line plugins/parsers/influx/machine.go:5747 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr105 - case 45: - goto tr106 - case 46: - goto tr107 - case 48: - goto tr108 - case 70: - goto tr110 - case 84: - goto tr111 - case 92: - goto st73 - case 102: - goto tr112 - case 116: - goto tr113 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr109 - } - goto st6 -tr105: - ( m.cs) = 296 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st296: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof296 - } - st_case_296: -//line plugins/parsers/influx/machine.go:5792 - switch ( m.data)[( m.p)] { - case 10: - goto tr492 - case 13: - goto tr493 - case 32: - goto tr491 - case 34: - goto tr25 - case 44: - goto tr494 - case 92: - goto tr26 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr491 - } - goto tr23 -tr491: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st297 -tr980: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr985: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr988: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr991: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st297: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof297 - } - st_case_297: -//line plugins/parsers/influx/machine.go:5874 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 13: - goto st72 - case 32: - goto st297 - case 34: - goto tr29 - case 45: - goto tr497 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr498 - } - case ( m.data)[( m.p)] >= 9: - goto st297 - } - goto st6 -tr492: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st298 -tr219: -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st298 -tr636: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr600: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr817: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr822: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr803: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr758: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr791: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr797: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st298: -//line plugins/parsers/influx/machine.go.rl:172 - - m.finishMetric = true - ( m.cs) = 739; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof298 - } - st_case_298: -//line plugins/parsers/influx/machine.go:6081 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr115 - case 13: - goto st6 - case 32: - goto st37 - case 34: - goto tr116 - case 35: - goto st6 - case 44: - goto st6 - case 92: - goto tr85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st37 - } - goto tr80 - st37: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof37 - } - st_case_37: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr115 - case 13: - goto st6 - case 32: - goto st37 - case 34: - goto tr116 - case 35: - goto st6 - case 44: - goto st6 - case 92: - goto tr85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st37 - } - goto tr80 -tr115: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st38 - st38: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof38 - } - st_case_38: -//line plugins/parsers/influx/machine.go:6142 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr118 - case 13: - goto st6 - case 32: - goto tr117 - case 34: - goto tr83 - case 35: - goto st29 - case 44: - goto tr90 - case 92: - goto tr85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr117 - } - goto tr80 -tr117: - ( m.cs) = 39 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st39: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof39 - } - st_case_39: -//line plugins/parsers/influx/machine.go:6183 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr121 - case 13: - goto st6 - case 32: - goto st39 - case 34: - goto tr122 - case 35: - goto tr92 - case 44: - goto st6 - case 61: - goto tr80 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st39 - } - goto tr119 -tr119: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st40 - st40: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof40 - } - st_case_40: -//line plugins/parsers/influx/machine.go:6219 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr125 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st40 -tr125: - ( m.cs) = 41 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr129: - ( m.cs) = 41 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st41: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof41 - } - st_case_41: -//line plugins/parsers/influx/machine.go:6277 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr129 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto tr119 -tr122: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr126: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st299: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof299 - } - st_case_299: -//line plugins/parsers/influx/machine.go:6335 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr500 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr501 - case 61: - goto tr47 - case 92: - goto st27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr499 - } - goto st10 -tr499: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr563: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr811: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr729: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr941: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr947: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr953: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1005: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1009: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1013: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st300: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof300 - } - st_case_300: -//line plugins/parsers/influx/machine.go:6571 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr503 - case 13: - goto st32 - case 32: - goto st300 - case 44: - goto tr103 - case 45: - goto tr465 - case 61: - goto tr103 - case 92: - goto tr10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr466 - } - case ( m.data)[( m.p)] >= 9: - goto st300 - } - goto tr6 -tr503: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st301 - st301: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof301 - } - st_case_301: -//line plugins/parsers/influx/machine.go:6610 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr503 - case 13: - goto st32 - case 32: - goto st300 - case 44: - goto tr103 - case 45: - goto tr465 - case 61: - goto tr12 - case 92: - goto tr10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr466 - } - case ( m.data)[( m.p)] >= 9: - goto st300 - } - goto tr6 -tr500: - ( m.cs) = 302 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr504: - ( m.cs) = 302 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st302: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof302 - } - st_case_302: -//line plugins/parsers/influx/machine.go:6673 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr504 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr4 - case 45: - goto tr505 - case 61: - goto tr47 - case 92: - goto tr43 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr506 - } - case ( m.data)[( m.p)] >= 9: - goto tr499 - } - goto tr39 -tr505: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st42 - st42: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof42 - } - st_case_42: -//line plugins/parsers/influx/machine.go:6712 - switch ( m.data)[( m.p)] { - case 10: - goto tr130 - case 11: - goto tr46 - case 13: - goto tr130 - case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st303 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st10 -tr506: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st303 - st303: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof303 - } - st_case_303: -//line plugins/parsers/influx/machine.go:6749 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st307 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 -tr512: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr572: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr507: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr569: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st304: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof304 - } - st_case_304: -//line plugins/parsers/influx/machine.go:6852 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr511 - case 13: - goto st32 - case 32: - goto st304 - case 44: - goto tr8 - case 61: - goto tr8 - case 92: - goto tr10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st304 - } - goto tr6 -tr511: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st305 - st305: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof305 - } - st_case_305: -//line plugins/parsers/influx/machine.go:6884 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr511 - case 13: - goto st32 - case 32: - goto st304 - case 44: - goto tr8 - case 61: - goto tr12 - case 92: - goto tr10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st304 - } - goto tr6 -tr513: - ( m.cs) = 306 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr508: - ( m.cs) = 306 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st306: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof306 - } - st_case_306: -//line plugins/parsers/influx/machine.go:6950 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr513 - case 13: - goto st32 - case 32: - goto tr512 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr512 - } - goto tr39 - st307: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof307 - } - st_case_307: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st308 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st308: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof308 - } - st_case_308: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st309 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st309: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof309 - } - st_case_309: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st310 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st310: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof310 - } - st_case_310: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st311 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st311: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof311 - } - st_case_311: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st312 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st312: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof312 - } - st_case_312: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st313 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st313: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof313 - } - st_case_313: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st314 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st314: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof314 - } - st_case_314: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st315 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st315: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof315 - } - st_case_315: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st316 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st316: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof316 - } - st_case_316: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st317 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st317: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof317 - } - st_case_317: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st318 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st318: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof318 - } - st_case_318: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st319 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st319: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof319 - } - st_case_319: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st320 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st320: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof320 - } - st_case_320: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st321 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st321: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof321 - } - st_case_321: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st322 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st322: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof322 - } - st_case_322: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st323 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st323: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof323 - } - st_case_323: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st324 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st324: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof324 - } - st_case_324: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr507 - } - goto st10 -tr501: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr565: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr813: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr733: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr945: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr951: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr957: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1007: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1011: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1015: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st43: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof43 - } - st_case_43: -//line plugins/parsers/influx/machine.go:7721 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 44: - goto tr45 - case 61: - goto tr45 - case 92: - goto tr133 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto tr132 -tr132: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st44 - st44: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof44 - } - st_case_44: -//line plugins/parsers/influx/machine.go:7752 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 44: - goto tr45 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 -tr135: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st45 - st45: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof45 - } - st_case_45: -//line plugins/parsers/influx/machine.go:7787 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 34: - goto tr137 - case 44: - goto tr45 - case 45: - goto tr138 - case 46: - goto tr139 - case 48: - goto tr140 - case 61: - goto tr45 - case 70: - goto tr142 - case 84: - goto tr143 - case 92: - goto tr56 - case 102: - goto tr144 - case 116: - goto tr145 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr45 - } - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr141 - } - default: - goto tr45 - } - goto tr55 -tr137: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st46 - st46: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof46 - } - st_case_46: -//line plugins/parsers/influx/machine.go:7838 - switch ( m.data)[( m.p)] { - case 10: - goto tr24 - case 11: - goto tr148 - case 13: - goto tr23 - case 32: - goto tr147 - case 34: - goto tr149 - case 44: - goto tr150 - case 61: - goto tr23 - case 92: - goto tr151 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr147 - } - goto tr146 -tr146: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st47 - st47: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof47 - } - st_case_47: -//line plugins/parsers/influx/machine.go:7872 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr178: - ( m.cs) = 48 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr153: - ( m.cs) = 48 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr147: - ( m.cs) = 48 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st48: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof48 - } - st_case_48: -//line plugins/parsers/influx/machine.go:7943 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr160 - case 13: - goto st6 - case 32: - goto st48 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st48 - } - goto tr158 -tr158: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st49 - st49: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof49 - } - st_case_49: -//line plugins/parsers/influx/machine.go:7977 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st49 -tr163: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st50 - st50: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof50 - } - st_case_50: -//line plugins/parsers/influx/machine.go:8009 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr105 - case 45: - goto tr165 - case 46: - goto tr166 - case 48: - goto tr167 - case 70: - goto tr169 - case 84: - goto tr170 - case 92: - goto st73 - case 102: - goto tr171 - case 116: - goto tr172 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr168 - } - goto st6 -tr165: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st51 - st51: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof51 - } - st_case_51: -//line plugins/parsers/influx/machine.go:8047 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 46: - goto st52 - case 48: - goto st631 - case 92: - goto st73 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st632 - } - goto st6 -tr166: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st52 - st52: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof52 - } - st_case_52: -//line plugins/parsers/influx/machine.go:8075 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st325 - } - goto st6 - st325: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof325 - } - st_case_325: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st325 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 -tr916: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st326 -tr531: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr923: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr925: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr928: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st326: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof326 - } - st_case_326: -//line plugins/parsers/influx/machine.go:8183 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 13: - goto st102 - case 32: - goto st326 - case 34: - goto tr29 - case 45: - goto tr538 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr539 - } - case ( m.data)[( m.p)] >= 9: - goto st326 - } - goto st6 -tr665: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st327 -tr273: -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st327 -tr532: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr674: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr737: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr743: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr749: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr891: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again - st327: -//line plugins/parsers/influx/machine.go.rl:172 - - m.finishMetric = true - ( m.cs) = 739; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof327 - } - st_case_327: -//line plugins/parsers/influx/machine.go:8352 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr337 - case 13: - goto st6 - case 32: - goto st164 - case 34: - goto tr116 - case 35: - goto st6 - case 44: - goto st6 - case 92: - goto tr338 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st164 - } - goto tr335 -tr335: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st53 - st53: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof53 - } - st_case_53: -//line plugins/parsers/influx/machine.go:8386 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 -tr179: - ( m.cs) = 54 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st54: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof54 - } - st_case_54: -//line plugins/parsers/influx/machine.go:8425 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr183 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto st53 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto tr182 -tr182: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st55 - st55: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof55 - } - st_case_55: -//line plugins/parsers/influx/machine.go:8459 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr186 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st55 -tr186: - ( m.cs) = 56 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr183: - ( m.cs) = 56 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st56: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof56 - } - st_case_56: -//line plugins/parsers/influx/machine.go:8517 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr183 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto tr182 -tr180: - ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr156: - ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr150: - ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st57: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof57 - } - st_case_57: -//line plugins/parsers/influx/machine.go:8588 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr190 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr191 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr189 -tr189: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st58 - st58: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof58 - } - st_case_58: -//line plugins/parsers/influx/machine.go:8620 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr194 - case 92: - goto st69 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st58 -tr190: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr193: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st328: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof328 - } - st_case_328: -//line plugins/parsers/influx/machine.go:8676 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st329 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - case 61: - goto tr53 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 - } - goto st13 - st329: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof329 - } - st_case_329: - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st329 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto tr196 - case 45: - goto tr541 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr542 - } - case ( m.data)[( m.p)] >= 9: - goto st271 - } - goto st13 -tr541: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st59 - st59: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof59 - } - st_case_59: -//line plugins/parsers/influx/machine.go:8740 - switch ( m.data)[( m.p)] { - case 32: - goto tr196 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr196 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st330 - } - default: - goto tr196 - } - goto st13 -tr542: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st330 - st330: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof330 - } - st_case_330: -//line plugins/parsers/influx/machine.go:8775 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st332 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 -tr543: - ( m.cs) = 331 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st331: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof331 - } - st_case_331: -//line plugins/parsers/influx/machine.go:8819 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st331 - case 13: - goto st32 - case 32: - goto st276 - case 44: - goto tr2 - case 61: - goto tr53 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 - } - goto st13 - st332: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof332 - } - st_case_332: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st333 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st333: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof333 - } - st_case_333: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st334 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st334: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof334 - } - st_case_334: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st335 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st335: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof335 - } - st_case_335: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st336 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st336: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof336 - } - st_case_336: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st337 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st337: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof337 - } - st_case_337: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st338 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st338: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof338 - } - st_case_338: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st339 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st339: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof339 - } - st_case_339: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st340 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st340: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof340 - } - st_case_340: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st341 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st341: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof341 - } - st_case_341: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st342 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st342: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof342 - } - st_case_342: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st343 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st343: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof343 - } - st_case_343: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st344 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st344: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof344 - } - st_case_344: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st345 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st345: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof345 - } - st_case_345: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st346 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st346: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof346 - } - st_case_346: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st347 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st347: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof347 - } - st_case_347: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st348 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st348: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof348 - } - st_case_348: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st349 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st349: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof349 - } - st_case_349: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 - } - goto st13 -tr194: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - - goto st60 - st60: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof60 - } - st_case_60: -//line plugins/parsers/influx/machine.go:9386 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr149 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr151 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr146 -tr149: - ( m.cs) = 350 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr155: - ( m.cs) = 350 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st350: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof350 - } - st_case_350: -//line plugins/parsers/influx/machine.go:9442 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr564 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr565 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr563 - } - goto st15 -tr564: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr731: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr943: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr949: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr955: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st351: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof351 - } - st_case_351: -//line plugins/parsers/influx/machine.go:9573 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr566 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr60 - case 45: - goto tr567 - case 61: - goto tr130 - case 92: - goto tr64 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr568 - } - case ( m.data)[( m.p)] >= 9: - goto tr563 - } - goto tr62 -tr591: - ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr566: - ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st352: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof352 - } - st_case_352: -//line plugins/parsers/influx/machine.go:9636 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr566 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr60 - case 45: - goto tr567 - case 61: - goto tr12 - case 92: - goto tr64 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr568 - } - case ( m.data)[( m.p)] >= 9: - goto tr563 - } - goto tr62 -tr567: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st61 - st61: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof61 - } - st_case_61: -//line plugins/parsers/influx/machine.go:9675 - switch ( m.data)[( m.p)] { - case 10: - goto tr130 - case 11: - goto tr66 - case 13: - goto tr130 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st353 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st17 -tr568: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st353 - st353: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof353 - } - st_case_353: -//line plugins/parsers/influx/machine.go:9712 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st355 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 -tr573: - ( m.cs) = 354 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr570: - ( m.cs) = 354 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st354: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof354 - } - st_case_354: -//line plugins/parsers/influx/machine.go:9783 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr573 - case 13: - goto st32 - case 32: - goto tr572 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto tr64 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr572 - } - goto tr62 - st355: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof355 - } - st_case_355: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st356 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st356: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof356 - } - st_case_356: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st357 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st357: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof357 - } - st_case_357: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st358 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st358: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof358 - } - st_case_358: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st359 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st359: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof359 - } - st_case_359: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st360 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st360: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof360 - } - st_case_360: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st361 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st361: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof361 - } - st_case_361: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st362 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st362: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof362 - } - st_case_362: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st363 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st363: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof363 - } - st_case_363: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st364 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st364: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof364 - } - st_case_364: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st365 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st365: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof365 - } - st_case_365: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st366 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st366: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof366 - } - st_case_366: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st367 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st367: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof367 - } - st_case_367: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st368 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st368: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof368 - } - st_case_368: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st369 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st369: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof369 - } - st_case_369: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st370 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st370: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof370 - } - st_case_370: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st371 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st371: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof371 - } - st_case_371: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st372 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st372: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof372 - } - st_case_372: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr569 - } - goto st17 -tr151: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st62 - st62: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof62 - } - st_case_62: -//line plugins/parsers/influx/machine.go:10350 - switch ( m.data)[( m.p)] { - case 34: - goto st47 - case 92: - goto st63 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st15 - st63: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof63 - } - st_case_63: -//line plugins/parsers/influx/machine.go:10374 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr154: - ( m.cs) = 64 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr148: - ( m.cs) = 64 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st64: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof64 - } - st_case_64: -//line plugins/parsers/influx/machine.go:10432 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr201 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr202 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto tr203 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto tr200 -tr200: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st65 - st65: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof65 - } - st_case_65: -//line plugins/parsers/influx/machine.go:10466 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr205 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st65 -tr205: - ( m.cs) = 66 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr201: - ( m.cs) = 66 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st66: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof66 - } - st_case_66: -//line plugins/parsers/influx/machine.go:10524 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr201 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr202 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto tr203 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto tr200 -tr202: - ( m.cs) = 373 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr206: - ( m.cs) = 373 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st373: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof373 - } - st_case_373: -//line plugins/parsers/influx/machine.go:10582 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr591 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr565 - case 61: - goto tr12 - case 92: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr563 - } - goto st17 -tr203: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st67 - st67: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof67 - } - st_case_67: -//line plugins/parsers/influx/machine.go:10614 - switch ( m.data)[( m.p)] { - case 34: - goto st65 - case 92: - goto st68 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st17 - st68: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof68 - } - st_case_68: -//line plugins/parsers/influx/machine.go:10638 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr205 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st65 -tr191: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st69 - st69: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof69 - } - st_case_69: -//line plugins/parsers/influx/machine.go:10672 - switch ( m.data)[( m.p)] { - case 34: - goto st58 - case 92: - goto st70 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st13 - st70: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof70 - } - st_case_70: -//line plugins/parsers/influx/machine.go:10696 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr194 - case 92: - goto st69 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st58 -tr187: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st71 -tr344: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st71 - st71: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof71 - } - st_case_71: -//line plugins/parsers/influx/machine.go:10738 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr210 - case 44: - goto tr180 - case 45: - goto tr211 - case 46: - goto tr212 - case 48: - goto tr213 - case 70: - goto tr215 - case 84: - goto tr216 - case 92: - goto st155 - case 102: - goto tr217 - case 116: - goto tr218 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr214 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 -tr210: - ( m.cs) = 374 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st374: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof374 - } - st_case_374: -//line plugins/parsers/influx/machine.go:10796 - switch ( m.data)[( m.p)] { - case 10: - goto tr492 - case 11: - goto tr593 - case 13: - goto tr493 - case 32: - goto tr592 - case 34: - goto tr83 - case 44: - goto tr594 - case 92: - goto tr85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr592 - } - goto tr80 -tr623: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr592: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr762: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr635: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr757: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr790: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr796: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr802: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr816: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr821: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr826: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st375: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof375 - } - st_case_375: -//line plugins/parsers/influx/machine.go:11049 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr596 - case 13: - goto st72 - case 32: - goto st375 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr597 - case 61: - goto st6 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr598 - } - case ( m.data)[( m.p)] >= 9: - goto st375 - } - goto tr92 -tr596: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st376 - st376: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof376 - } - st_case_376: -//line plugins/parsers/influx/machine.go:11090 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr596 - case 13: - goto st72 - case 32: - goto st375 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr597 - case 61: - goto tr99 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr598 - } - case ( m.data)[( m.p)] >= 9: - goto st375 - } - goto tr92 -tr493: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st72 -tr602: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr638: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr793: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr799: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr805: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st72: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof72 - } - st_case_72: -//line plugins/parsers/influx/machine.go:11196 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 34: - goto tr29 - case 92: - goto st73 - } - goto st6 -tr26: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st73 - st73: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof73 - } - st_case_73: -//line plugins/parsers/influx/machine.go:11217 - switch ( m.data)[( m.p)] { - case 34: - goto st6 - case 92: - goto st6 - } - goto tr8 -tr597: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st74 - st74: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof74 - } - st_case_74: -//line plugins/parsers/influx/machine.go:11236 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st377 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto st31 -tr598: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st377 - st377: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof377 - } - st_case_377: -//line plugins/parsers/influx/machine.go:11273 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st380 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 -tr599: - ( m.cs) = 378 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st378: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof378 - } - st_case_378: -//line plugins/parsers/influx/machine.go:11319 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 13: - goto st72 - case 32: - goto st378 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st378 - } - goto st6 -tr601: - ( m.cs) = 379 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st379: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof379 - } - st_case_379: -//line plugins/parsers/influx/machine.go:11354 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto st379 - case 13: - goto st72 - case 32: - goto st378 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st378 - } - goto st31 -tr96: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st75 - st75: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof75 - } - st_case_75: -//line plugins/parsers/influx/machine.go:11388 - switch ( m.data)[( m.p)] { - case 34: - goto st31 - case 92: - goto st31 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st380: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof380 - } - st_case_380: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st381 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st381: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof381 - } - st_case_381: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st382 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st382: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof382 - } - st_case_382: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st383 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st383: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof383 - } - st_case_383: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st384 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st384: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof384 - } - st_case_384: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st385 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st385: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof385 - } - st_case_385: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st386 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st386: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof386 - } - st_case_386: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st387 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st387: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof387 - } - st_case_387: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st388 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st388: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof388 - } - st_case_388: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st389 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st389: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof389 - } - st_case_389: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st390: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof390 - } - st_case_390: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st391 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st391: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof391 - } - st_case_391: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st392: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof392 - } - st_case_392: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st393 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st393: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof393 - } - st_case_393: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st394 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st394: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof394 - } - st_case_394: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st395 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st395: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof395 - } - st_case_395: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st396 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st396: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof396 - } - st_case_396: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st397 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st397: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof397 - } - st_case_397: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr599 - } - goto st31 -tr593: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr637: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr818: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr823: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr827: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st398: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof398 - } - st_case_398: -//line plugins/parsers/influx/machine.go:12089 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr624 - case 13: - goto st72 - case 32: - goto tr623 - case 34: - goto tr122 - case 44: - goto tr90 - case 45: - goto tr625 - case 61: - goto st29 - case 92: - goto tr123 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr626 - } - case ( m.data)[( m.p)] >= 9: - goto tr623 - } - goto tr119 -tr624: - ( m.cs) = 399 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st399: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof399 - } - st_case_399: -//line plugins/parsers/influx/machine.go:12141 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr624 - case 13: - goto st72 - case 32: - goto tr623 - case 34: - goto tr122 - case 44: - goto tr90 - case 45: - goto tr625 - case 61: - goto tr127 - case 92: - goto tr123 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr626 - } - case ( m.data)[( m.p)] >= 9: - goto tr623 - } - goto tr119 -tr90: - ( m.cs) = 76 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr84: - ( m.cs) = 76 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr231: - ( m.cs) = 76 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st76: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof76 - } - st_case_76: -//line plugins/parsers/influx/machine.go:12219 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr190 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr222 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr221 -tr221: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st77 - st77: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof77 - } - st_case_77: -//line plugins/parsers/influx/machine.go:12251 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr224 - case 92: - goto st87 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st77 -tr224: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - - goto st78 - st78: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof78 - } - st_case_78: -//line plugins/parsers/influx/machine.go:12283 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr149 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr227 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr226 -tr226: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st79 - st79: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof79 - } - st_case_79: -//line plugins/parsers/influx/machine.go:12315 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr230: - ( m.cs) = 80 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st80: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof80 - } - st_case_80: -//line plugins/parsers/influx/machine.go:12356 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr234 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr202 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto tr235 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto tr233 -tr233: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st81 - st81: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof81 - } - st_case_81: -//line plugins/parsers/influx/machine.go:12390 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr237 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st81 -tr237: - ( m.cs) = 82 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr234: - ( m.cs) = 82 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st82: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof82 - } - st_case_82: -//line plugins/parsers/influx/machine.go:12448 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr234 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr202 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto tr235 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto tr233 -tr235: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st83 - st83: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof83 - } - st_case_83: -//line plugins/parsers/influx/machine.go:12482 - switch ( m.data)[( m.p)] { - case 34: - goto st81 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st17 - st84: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof84 - } - st_case_84: -//line plugins/parsers/influx/machine.go:12506 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr237 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st81 -tr227: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st85 - st85: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof85 - } - st_case_85: -//line plugins/parsers/influx/machine.go:12540 - switch ( m.data)[( m.p)] { - case 34: - goto st79 - case 92: - goto st86 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st15 - st86: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof86 - } - st_case_86: -//line plugins/parsers/influx/machine.go:12564 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr222: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st87 - st87: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof87 - } - st_case_87: -//line plugins/parsers/influx/machine.go:12598 - switch ( m.data)[( m.p)] { - case 34: - goto st77 - case 92: - goto st88 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st13 - st88: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof88 - } - st_case_88: -//line plugins/parsers/influx/machine.go:12622 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr224 - case 92: - goto st87 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st77 -tr625: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st89 - st89: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof89 - } - st_case_89: -//line plugins/parsers/influx/machine.go:12654 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr125 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st400 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st40 -tr626: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st400 - st400: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof400 - } - st_case_400: -//line plugins/parsers/influx/machine.go:12693 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st544 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 -tr632: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr769: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr627: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr766: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st401: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof401 - } - st_case_401: -//line plugins/parsers/influx/machine.go:12798 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr631 - case 13: - goto st72 - case 32: - goto st401 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st401 - } - goto tr92 -tr631: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st402 - st402: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof402 - } - st_case_402: -//line plugins/parsers/influx/machine.go:12832 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr631 - case 13: - goto st72 - case 32: - goto st401 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st401 - } - goto tr92 -tr633: - ( m.cs) = 403 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr628: - ( m.cs) = 403 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st403: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof403 - } - st_case_403: -//line plugins/parsers/influx/machine.go:12900 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr633 - case 13: - goto st72 - case 32: - goto tr632 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr632 - } - goto tr119 -tr127: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st90 -tr381: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st90 - st90: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof90 - } - st_case_90: -//line plugins/parsers/influx/machine.go:12944 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr210 - case 44: - goto tr90 - case 45: - goto tr243 - case 46: - goto tr244 - case 48: - goto tr245 - case 70: - goto tr247 - case 84: - goto tr248 - case 92: - goto st140 - case 102: - goto tr249 - case 116: - goto tr250 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr246 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 -tr88: - ( m.cs) = 91 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr82: - ( m.cs) = 91 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st91: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof91 - } - st_case_91: -//line plugins/parsers/influx/machine.go:13019 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr129 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto st29 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto tr119 -tr123: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st92 - st92: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof92 - } - st_case_92: -//line plugins/parsers/influx/machine.go:13053 - switch ( m.data)[( m.p)] { - case 34: - goto st40 - case 92: - goto st40 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st10 -tr243: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st93 - st93: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof93 - } - st_case_93: -//line plugins/parsers/influx/machine.go:13080 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 46: - goto st95 - case 48: - goto st532 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st535 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 -tr83: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr89: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr116: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st404: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof404 - } - st_case_404: -//line plugins/parsers/influx/machine.go:13162 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr634 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr501 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr499 - } - goto st1 -tr634: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr812: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1006: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1010: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1014: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st405: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof405 - } - st_case_405: -//line plugins/parsers/influx/machine.go:13291 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr504 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr4 - case 45: - goto tr505 - case 61: - goto st1 - case 92: - goto tr43 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr506 - } - case ( m.data)[( m.p)] >= 9: - goto tr499 - } - goto tr39 -tr35: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st94 -tr458: -//line plugins/parsers/influx/machine.go.rl:82 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st94 - st94: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof94 - } - st_case_94: -//line plugins/parsers/influx/machine.go:13340 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: - goto st0 - } - goto st1 -tr244: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st95 - st95: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof95 - } - st_case_95: -//line plugins/parsers/influx/machine.go:13361 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st406 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 - st406: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof406 - } - st_case_406: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st406 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr594: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr639: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr760: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr794: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr800: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr806: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr819: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr824: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr828: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st96: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof96 - } - st_case_96: -//line plugins/parsers/influx/machine.go:13627 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr256 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr257 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr255 -tr255: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st97 - st97: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof97 - } - st_case_97: -//line plugins/parsers/influx/machine.go:13659 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr260 - case 92: - goto st136 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st97 -tr256: - ( m.cs) = 407 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr259: - ( m.cs) = 407 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st407: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof407 - } - st_case_407: -//line plugins/parsers/influx/machine.go:13715 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st408 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - case 61: - goto tr135 - case 92: - goto st99 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 - } - goto st44 - st408: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof408 - } - st_case_408: - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st408 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto tr130 - case 45: - goto tr642 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr643 - } - case ( m.data)[( m.p)] >= 9: - goto st271 - } - goto st44 -tr642: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st98 - st98: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof98 - } - st_case_98: -//line plugins/parsers/influx/machine.go:13779 - switch ( m.data)[( m.p)] { - case 32: - goto tr130 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr130 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st409 - } - default: - goto tr130 - } - goto st44 -tr643: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st409 - st409: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof409 - } - st_case_409: -//line plugins/parsers/influx/machine.go:13814 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st411 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 -tr644: - ( m.cs) = 410 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st410: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof410 - } - st_case_410: -//line plugins/parsers/influx/machine.go:13858 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st410 - case 13: - goto st32 - case 32: - goto st276 - case 44: - goto tr45 - case 61: - goto tr135 - case 92: - goto st99 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 - } - goto st44 -tr133: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st99 - st99: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof99 - } - st_case_99: -//line plugins/parsers/influx/machine.go:13890 - if ( m.data)[( m.p)] == 92 { - goto st100 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st100: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof100 - } - st_case_100: -//line plugins/parsers/influx/machine.go:13911 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 44: - goto tr45 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st411: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof411 - } - st_case_411: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st412 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st412: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof412 - } - st_case_412: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st413 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st413: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof413 - } - st_case_413: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st414 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st414: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof414 - } - st_case_414: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st415 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st415: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof415 - } - st_case_415: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st416 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st416: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof416 - } - st_case_416: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st417 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st417: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof417 - } - st_case_417: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st418 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st418: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof418 - } - st_case_418: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st419 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st419: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof419 - } - st_case_419: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st420 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st420: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof420 - } - st_case_420: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st421 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st421: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof421 - } - st_case_421: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st422 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st422: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof422 - } - st_case_422: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st423 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st423: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof423 - } - st_case_423: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st424 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st424: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof424 - } - st_case_424: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st425 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st425: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof425 - } - st_case_425: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st426 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st426: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof426 - } - st_case_426: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st427 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st427: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof427 - } - st_case_427: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st428 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st428: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof428 - } - st_case_428: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 - } - goto st44 -tr260: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st101 - st101: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof101 - } - st_case_101: -//line plugins/parsers/influx/machine.go:14481 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr264 - case 44: - goto st6 - case 45: - goto tr265 - case 46: - goto tr266 - case 48: - goto tr267 - case 61: - goto st6 - case 70: - goto tr269 - case 84: - goto tr270 - case 92: - goto tr227 - case 102: - goto tr271 - case 116: - goto tr272 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr268 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto tr226 -tr264: - ( m.cs) = 429 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st429: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof429 - } - st_case_429: -//line plugins/parsers/influx/machine.go:14543 - switch ( m.data)[( m.p)] { - case 10: - goto tr665 - case 11: - goto tr666 - case 13: - goto tr667 - case 32: - goto tr664 - case 34: - goto tr149 - case 44: - goto tr668 - case 61: - goto tr23 - case 92: - goto tr151 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr664 - } - goto tr146 -tr854: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr697: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr664: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr850: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr725: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr736: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr742: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr748: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr882: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr886: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr890: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st430: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof430 - } - st_case_430: -//line plugins/parsers/influx/machine.go:14798 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr670 - case 13: - goto st102 - case 32: - goto st430 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr671 - case 61: - goto st6 - case 92: - goto tr161 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 9: - goto st430 - } - goto tr158 -tr670: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st431 - st431: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof431 - } - st_case_431: -//line plugins/parsers/influx/machine.go:14839 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr670 - case 13: - goto st102 - case 32: - goto st430 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr671 - case 61: - goto tr163 - case 92: - goto tr161 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 9: - goto st430 - } - goto tr158 -tr667: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st102 -tr676: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr533: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr739: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr745: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr751: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st102: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof102 - } - st_case_102: -//line plugins/parsers/influx/machine.go:14945 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 34: - goto tr29 - case 92: - goto st73 - } - goto st6 -tr671: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st103 - st103: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof103 - } - st_case_103: -//line plugins/parsers/influx/machine.go:14966 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st432 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto st49 -tr672: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st432 - st432: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof432 - } - st_case_432: -//line plugins/parsers/influx/machine.go:15003 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st435 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 -tr673: - ( m.cs) = 433 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st433: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof433 - } - st_case_433: -//line plugins/parsers/influx/machine.go:15049 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 13: - goto st102 - case 32: - goto st433 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st433 - } - goto st6 -tr675: - ( m.cs) = 434 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st434: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof434 - } - st_case_434: -//line plugins/parsers/influx/machine.go:15084 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto st434 - case 13: - goto st102 - case 32: - goto st433 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st433 - } - goto st49 -tr161: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st104 - st104: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof104 - } - st_case_104: -//line plugins/parsers/influx/machine.go:15118 - switch ( m.data)[( m.p)] { - case 34: - goto st49 - case 92: - goto st49 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st435: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof435 - } - st_case_435: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st436 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st436: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof436 - } - st_case_436: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st437 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st437: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof437 - } - st_case_437: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st438 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st438: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof438 - } - st_case_438: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st439 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st439: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof439 - } - st_case_439: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st440 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st440: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof440 - } - st_case_440: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st441 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st441: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof441 - } - st_case_441: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st442 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st442: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof442 - } - st_case_442: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st443 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st443: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof443 - } - st_case_443: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st444 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st444: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof444 - } - st_case_444: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st445 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st445: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof445 - } - st_case_445: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st446 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st446: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof446 - } - st_case_446: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st447 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st447: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof447 - } - st_case_447: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st448 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st448: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof448 - } - st_case_448: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st449 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st449: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof449 - } - st_case_449: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st450 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st450: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof450 - } - st_case_450: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st451 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st451: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof451 - } - st_case_451: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st452 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st452: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof452 - } - st_case_452: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr673 - } - goto st49 -tr666: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr726: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr738: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr744: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr750: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st453: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof453 - } - st_case_453: -//line plugins/parsers/influx/machine.go:15819 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr698 - case 13: - goto st102 - case 32: - goto tr697 - case 34: - goto tr202 - case 44: - goto tr156 - case 45: - goto tr699 - case 61: - goto st6 - case 92: - goto tr203 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr700 - } - case ( m.data)[( m.p)] >= 9: - goto tr697 - } - goto tr200 -tr698: - ( m.cs) = 454 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st454: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof454 - } - st_case_454: -//line plugins/parsers/influx/machine.go:15871 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr698 - case 13: - goto st102 - case 32: - goto tr697 - case 34: - goto tr202 - case 44: - goto tr156 - case 45: - goto tr699 - case 61: - goto tr163 - case 92: - goto tr203 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr700 - } - case ( m.data)[( m.p)] >= 9: - goto tr697 - } - goto tr200 -tr699: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st105 - st105: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof105 - } - st_case_105: -//line plugins/parsers/influx/machine.go:15912 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr205 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st455 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st65 -tr700: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st455 - st455: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof455 - } - st_case_455: -//line plugins/parsers/influx/machine.go:15951 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st459 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 -tr861: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr706: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr858: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr701: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st456: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof456 - } - st_case_456: -//line plugins/parsers/influx/machine.go:16056 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr705 - case 13: - goto st102 - case 32: - goto st456 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st456 - } - goto tr158 -tr705: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st457 - st457: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof457 - } - st_case_457: -//line plugins/parsers/influx/machine.go:16090 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr705 - case 13: - goto st102 - case 32: - goto st456 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st456 - } - goto tr158 -tr707: - ( m.cs) = 458 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr702: - ( m.cs) = 458 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st458: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof458 - } - st_case_458: -//line plugins/parsers/influx/machine.go:16158 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr707 - case 13: - goto st102 - case 32: - goto tr706 - case 34: - goto tr202 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto tr203 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr706 - } - goto tr200 - st459: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof459 - } - st_case_459: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st460 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st460: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof460 - } - st_case_460: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st461 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st461: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof461 - } - st_case_461: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st462 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st462: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof462 - } - st_case_462: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st463 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st463: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof463 - } - st_case_463: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st464 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st464: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof464 - } - st_case_464: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st465 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st465: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof465 - } - st_case_465: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st466 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st466: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof466 - } - st_case_466: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st467 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st467: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof467 - } - st_case_467: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st468 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st468: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof468 - } - st_case_468: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st469: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof469 - } - st_case_469: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st470: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof470 - } - st_case_470: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st471 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st471: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof471 - } - st_case_471: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st472: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof472 - } - st_case_472: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st473 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st473: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof473 - } - st_case_473: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st474 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st474: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof474 - } - st_case_474: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st475 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st475: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof475 - } - st_case_475: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st476 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st476: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof476 - } - st_case_476: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr701 - } - goto st65 -tr668: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr852: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr727: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr740: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr746: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr752: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr884: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr888: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr893: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st106: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof106 - } - st_case_106: -//line plugins/parsers/influx/machine.go:16958 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr256 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr277 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr276 -tr276: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st107 - st107: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof107 - } - st_case_107: -//line plugins/parsers/influx/machine.go:16990 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr279 - case 92: - goto st121 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st107 -tr279: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st108 - st108: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof108 - } - st_case_108: -//line plugins/parsers/influx/machine.go:17026 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr264 - case 44: - goto st6 - case 45: - goto tr281 - case 46: - goto tr282 - case 48: - goto tr283 - case 61: - goto st6 - case 70: - goto tr285 - case 84: - goto tr286 - case 92: - goto tr151 - case 102: - goto tr287 - case 116: - goto tr288 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr284 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto tr146 -tr281: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st109 - st109: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof109 - } - st_case_109: -//line plugins/parsers/influx/machine.go:17077 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 46: - goto st110 - case 48: - goto st481 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st484 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st47 -tr282: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st110 - st110: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof110 - } - st_case_110: -//line plugins/parsers/influx/machine.go:17120 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st47 - st477: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof477 - } - st_case_477: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st111: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof111 - } - st_case_111: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr293 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - default: - goto st112 - } - goto st47 -tr293: - ( m.cs) = 478 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st478: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof478 - } - st_case_478: -//line plugins/parsers/influx/machine.go:17238 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr564 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr565 - case 61: - goto tr130 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 9: - goto tr563 - } - goto st15 - st479: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof479 - } - st_case_479: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 61: - goto tr130 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st112: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof112 - } - st_case_112: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st47 - st480: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof480 - } - st_case_480: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st481: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof481 - } - st_case_481: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st482: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof482 - } - st_case_482: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st483: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof483 - } - st_case_483: - switch ( m.data)[( m.p)] { - case 10: - goto tr737 - case 11: - goto tr738 - case 13: - goto tr739 - case 32: - goto tr736 - case 34: - goto tr155 - case 44: - goto tr740 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr736 - } - goto st47 - st484: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof484 - } - st_case_484: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st484 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 -tr283: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st485 - st485: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof485 - } - st_case_485: -//line plugins/parsers/influx/machine.go:17514 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - case 117: - goto st486 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st486: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof486 - } - st_case_486: - switch ( m.data)[( m.p)] { - case 10: - goto tr743 - case 11: - goto tr744 - case 13: - goto tr745 - case 32: - goto tr742 - case 34: - goto tr155 - case 44: - goto tr746 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr742 - } - goto st47 -tr284: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st487 - st487: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof487 - } - st_case_487: -//line plugins/parsers/influx/machine.go:17590 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - case 117: - goto st486 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st487 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 -tr285: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st488 - st488: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof488 - } - st_case_488: -//line plugins/parsers/influx/machine.go:17639 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 65: - goto st113 - case 92: - goto st62 - case 97: - goto st116 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 - st113: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof113 - } - st_case_113: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 76: - goto st114 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st114: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof114 - } - st_case_114: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 83: - goto st115 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st115: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof115 - } - st_case_115: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 69: - goto st489 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st489: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof489 - } - st_case_489: - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 - st116: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof116 - } - st_case_116: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 108: - goto st117 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st117: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof117 - } - st_case_117: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 115: - goto st118 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st118: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof118 - } - st_case_118: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 101: - goto st489 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr286: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st490 - st490: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof490 - } - st_case_490: -//line plugins/parsers/influx/machine.go:17878 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 82: - goto st119 - case 92: - goto st62 - case 114: - goto st120 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 - st119: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof119 - } - st_case_119: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 85: - goto st115 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st120: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof120 - } - st_case_120: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 117: - goto st118 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr287: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st491 - st491: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof491 - } - st_case_491: -//line plugins/parsers/influx/machine.go:17974 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 92: - goto st62 - case 97: - goto st116 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 -tr288: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st492 - st492: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof492 - } - st_case_492: -//line plugins/parsers/influx/machine.go:18010 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 92: - goto st62 - case 114: - goto st120 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 -tr277: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st121 - st121: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof121 - } - st_case_121: -//line plugins/parsers/influx/machine.go:18046 - switch ( m.data)[( m.p)] { - case 34: - goto st107 - case 92: - goto st122 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st122: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof122 - } - st_case_122: -//line plugins/parsers/influx/machine.go:18070 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr279 - case 92: - goto st121 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st107 -tr265: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st123 - st123: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof123 - } - st_case_123: -//line plugins/parsers/influx/machine.go:18102 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 46: - goto st124 - case 48: - goto st517 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st79 -tr266: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st124 - st124: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof124 - } - st_case_124: -//line plugins/parsers/influx/machine.go:18145 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st493 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st79 - st493: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof493 - } - st_case_493: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st493 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 -tr759: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr792: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr798: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr804: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st494: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof494 - } - st_case_494: -//line plugins/parsers/influx/machine.go:18306 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr763 - case 13: - goto st72 - case 32: - goto tr762 - case 34: - goto tr202 - case 44: - goto tr231 - case 45: - goto tr764 - case 61: - goto st6 - case 92: - goto tr235 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr765 - } - case ( m.data)[( m.p)] >= 9: - goto tr762 - } - goto tr233 -tr763: - ( m.cs) = 495 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st495: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof495 - } - st_case_495: -//line plugins/parsers/influx/machine.go:18358 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr763 - case 13: - goto st72 - case 32: - goto tr762 - case 34: - goto tr202 - case 44: - goto tr231 - case 45: - goto tr764 - case 61: - goto tr99 - case 92: - goto tr235 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr765 - } - case ( m.data)[( m.p)] >= 9: - goto tr762 - } - goto tr233 -tr764: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st125 - st125: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof125 - } - st_case_125: -//line plugins/parsers/influx/machine.go:18399 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr237 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st496 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st81 -tr765: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st496 - st496: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof496 - } - st_case_496: -//line plugins/parsers/influx/machine.go:18438 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st498 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 -tr770: - ( m.cs) = 497 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr767: - ( m.cs) = 497 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st497: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof497 - } - st_case_497: -//line plugins/parsers/influx/machine.go:18511 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr770 - case 13: - goto st72 - case 32: - goto tr769 - case 34: - goto tr202 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto tr235 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr769 - } - goto tr233 - st498: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof498 - } - st_case_498: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st499 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st499: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof499 - } - st_case_499: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st500 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st500: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof500 - } - st_case_500: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st501 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st501: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof501 - } - st_case_501: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st502 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st502: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof502 - } - st_case_502: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st503 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st503: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof503 - } - st_case_503: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st504 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st504: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof504 - } - st_case_504: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st505 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st505: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof505 - } - st_case_505: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st506 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st506: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof506 - } - st_case_506: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st507 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st507: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof507 - } - st_case_507: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st508 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st508: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof508 - } - st_case_508: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st509 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st509: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof509 - } - st_case_509: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st510 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st510: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof510 - } - st_case_510: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st511 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st511: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof511 - } - st_case_511: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st512 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st512: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof512 - } - st_case_512: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st513 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st513: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof513 - } - st_case_513: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st514 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st514: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof514 - } - st_case_514: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st515 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st515: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof515 - } - st_case_515: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr766 - } - goto st81 - st126: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof126 - } - st_case_126: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr293 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 - } - default: - goto st127 - } - goto st79 - st127: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof127 - } - st_case_127: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st79 - st516: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof516 - } - st_case_516: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st517: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof517 - } - st_case_517: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st518 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st518: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof518 - } - st_case_518: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st518 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st519: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof519 - } - st_case_519: - switch ( m.data)[( m.p)] { - case 10: - goto tr791 - case 11: - goto tr792 - case 13: - goto tr793 - case 32: - goto tr790 - case 34: - goto tr155 - case 44: - goto tr794 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr790 - } - goto st79 - st520: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof520 - } - st_case_520: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 -tr267: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st521 - st521: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof521 - } - st_case_521: -//line plugins/parsers/influx/machine.go:19361 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - case 117: - goto st522 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st518 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st522: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof522 - } - st_case_522: - switch ( m.data)[( m.p)] { - case 10: - goto tr797 - case 11: - goto tr798 - case 13: - goto tr799 - case 32: - goto tr796 - case 34: - goto tr155 - case 44: - goto tr800 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr796 - } - goto st79 -tr268: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st523 - st523: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof523 - } - st_case_523: -//line plugins/parsers/influx/machine.go:19437 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - case 117: - goto st522 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 -tr269: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st524 - st524: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof524 - } - st_case_524: -//line plugins/parsers/influx/machine.go:19486 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 65: - goto st128 - case 92: - goto st85 - case 97: - goto st131 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 - st128: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof128 - } - st_case_128: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 76: - goto st129 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st129: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof129 - } - st_case_129: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 83: - goto st130 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st130: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof130 - } - st_case_130: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 69: - goto st525 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st525: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof525 - } - st_case_525: - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 - st131: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof131 - } - st_case_131: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 108: - goto st132 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st132: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof132 - } - st_case_132: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 115: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st133: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof133 - } - st_case_133: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 101: - goto st525 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr270: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st526 - st526: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof526 - } - st_case_526: -//line plugins/parsers/influx/machine.go:19725 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 82: - goto st134 - case 92: - goto st85 - case 114: - goto st135 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 - st134: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof134 - } - st_case_134: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 85: - goto st130 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st135: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof135 - } - st_case_135: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 117: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr271: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st527 - st527: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof527 - } - st_case_527: -//line plugins/parsers/influx/machine.go:19821 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 92: - goto st85 - case 97: - goto st131 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 -tr272: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st528 - st528: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof528 - } - st_case_528: -//line plugins/parsers/influx/machine.go:19857 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 92: - goto st85 - case 114: - goto st135 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 -tr257: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st136 - st136: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof136 - } - st_case_136: -//line plugins/parsers/influx/machine.go:19893 - switch ( m.data)[( m.p)] { - case 34: - goto st97 - case 92: - goto st137 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st137: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof137 - } - st_case_137: -//line plugins/parsers/influx/machine.go:19917 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr260 - case 92: - goto st136 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st97 - st138: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof138 - } - st_case_138: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr315 - case 44: - goto tr90 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - default: - goto st139 - } - goto st29 -tr315: - ( m.cs) = 529 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st529: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof529 - } - st_case_529: -//line plugins/parsers/influx/machine.go:19990 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr634 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr501 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - case ( m.data)[( m.p)] >= 9: - goto tr499 - } - goto st1 - st530: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof530 - } - st_case_530: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st139: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof139 - } - st_case_139: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 - st531: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof531 - } - st_case_531: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr85: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st140 - st140: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof140 - } - st_case_140: -//line plugins/parsers/influx/machine.go:20113 - switch ( m.data)[( m.p)] { - case 34: - goto st29 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st1 - st532: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof532 - } - st_case_532: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st533 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 - st533: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof533 - } - st_case_533: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st533 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 - st534: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof534 - } - st_case_534: - switch ( m.data)[( m.p)] { - case 10: - goto tr817 - case 11: - goto tr818 - case 13: - goto tr793 - case 32: - goto tr816 - case 34: - goto tr89 - case 44: - goto tr819 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr816 - } - goto st29 - st535: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof535 - } - st_case_535: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st535 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr245: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st536 - st536: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof536 - } - st_case_536: -//line plugins/parsers/influx/machine.go:20277 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - case 117: - goto st537 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st533 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 - st537: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof537 - } - st_case_537: - switch ( m.data)[( m.p)] { - case 10: - goto tr822 - case 11: - goto tr823 - case 13: - goto tr799 - case 32: - goto tr821 - case 34: - goto tr89 - case 44: - goto tr824 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr821 - } - goto st29 -tr246: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st538 - st538: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof538 - } - st_case_538: -//line plugins/parsers/influx/machine.go:20349 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - case 117: - goto st537 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st538 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr247: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st539 - st539: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof539 - } - st_case_539: -//line plugins/parsers/influx/machine.go:20396 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 65: - goto st141 - case 92: - goto st140 - case 97: - goto st144 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st141: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof141 - } - st_case_141: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 76: - goto st142 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st142: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof142 - } - st_case_142: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 83: - goto st143 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st143: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof143 - } - st_case_143: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 69: - goto st540 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st540: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof540 - } - st_case_540: - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st144: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof144 - } - st_case_144: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 108: - goto st145 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st145: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof145 - } - st_case_145: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 115: - goto st146 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st146: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof146 - } - st_case_146: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 101: - goto st540 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 -tr248: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st541 - st541: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof541 - } - st_case_541: -//line plugins/parsers/influx/machine.go:20619 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 82: - goto st147 - case 92: - goto st140 - case 114: - goto st148 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st147: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof147 - } - st_case_147: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 85: - goto st143 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st148: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof148 - } - st_case_148: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 117: - goto st146 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 -tr249: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st542 - st542: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof542 - } - st_case_542: -//line plugins/parsers/influx/machine.go:20709 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 92: - goto st140 - case 97: - goto st144 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 -tr250: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st543 - st543: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof543 - } - st_case_543: -//line plugins/parsers/influx/machine.go:20743 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 92: - goto st140 - case 114: - goto st148 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st544: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof544 - } - st_case_544: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st545 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st545: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof545 - } - st_case_545: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st546 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st546: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof546 - } - st_case_546: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st547 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st547: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof547 - } - st_case_547: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st548 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st548: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof548 - } - st_case_548: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st549 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st549: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof549 - } - st_case_549: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st550 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st550: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof550 - } - st_case_550: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st551 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st551: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof551 - } - st_case_551: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st552 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st552: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof552 - } - st_case_552: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st553 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st553: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof553 - } - st_case_553: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st554 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st554: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof554 - } - st_case_554: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st555 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st555: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof555 - } - st_case_555: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st556 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st556: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof556 - } - st_case_556: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st557 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st557: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof557 - } - st_case_557: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st558 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st558: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof558 - } - st_case_558: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st559: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof559 - } - st_case_559: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st560 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st560: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof560 - } - st_case_560: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st561: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof561 - } - st_case_561: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr627 - } - goto st40 -tr211: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st149 - st149: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof149 - } - st_case_149: -//line plugins/parsers/influx/machine.go:21348 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 46: - goto st150 - case 48: - goto st586 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st589 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 -tr212: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st150 - st150: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof150 - } - st_case_150: -//line plugins/parsers/influx/machine.go:21389 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st562 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 - st562: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof562 - } - st_case_562: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st562 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr851: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr883: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr887: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr892: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st563: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof563 - } - st_case_563: -//line plugins/parsers/influx/machine.go:21546 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr855 - case 13: - goto st102 - case 32: - goto tr854 - case 34: - goto tr122 - case 44: - goto tr180 - case 45: - goto tr856 - case 61: - goto st53 - case 92: - goto tr184 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr857 - } - case ( m.data)[( m.p)] >= 9: - goto tr854 - } - goto tr182 -tr855: - ( m.cs) = 564 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st564: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof564 - } - st_case_564: -//line plugins/parsers/influx/machine.go:21598 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr855 - case 13: - goto st102 - case 32: - goto tr854 - case 34: - goto tr122 - case 44: - goto tr180 - case 45: - goto tr856 - case 61: - goto tr187 - case 92: - goto tr184 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr857 - } - case ( m.data)[( m.p)] >= 9: - goto tr854 - } - goto tr182 -tr856: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st151 - st151: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof151 - } - st_case_151: -//line plugins/parsers/influx/machine.go:21639 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr186 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st565 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st55 -tr857: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st565 - st565: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof565 - } - st_case_565: -//line plugins/parsers/influx/machine.go:21678 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st567 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 -tr862: - ( m.cs) = 566 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr859: - ( m.cs) = 566 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st566: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof566 - } - st_case_566: -//line plugins/parsers/influx/machine.go:21751 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr862 - case 13: - goto st102 - case 32: - goto tr861 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr861 - } - goto tr182 -tr184: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st152 - st152: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof152 - } - st_case_152: -//line plugins/parsers/influx/machine.go:21785 - switch ( m.data)[( m.p)] { - case 34: - goto st55 - case 92: - goto st55 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st10 - st567: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof567 - } - st_case_567: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st568 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st568: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof568 - } - st_case_568: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st569 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st569: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof569 - } - st_case_569: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st570 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st570: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof570 - } - st_case_570: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st571 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st571: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof571 - } - st_case_571: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st572 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st572: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof572 - } - st_case_572: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st573 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st573: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof573 - } - st_case_573: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st574: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof574 - } - st_case_574: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st575: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof575 - } - st_case_575: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st576 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st576: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof576 - } - st_case_576: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st577: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof577 - } - st_case_577: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st578 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st578: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof578 - } - st_case_578: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st579: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof579 - } - st_case_579: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st580 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st580: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof580 - } - st_case_580: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st581 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st581: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof581 - } - st_case_581: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st582 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st582: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof582 - } - st_case_582: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st583 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st583: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof583 - } - st_case_583: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st584 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st584: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof584 - } - st_case_584: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr858 - } - goto st55 - st153: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof153 - } - st_case_153: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr315 - case 44: - goto tr180 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st585 - } - default: - goto st154 - } - goto st53 - st154: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof154 - } - st_case_154: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st585 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 - st585: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof585 - } - st_case_585: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st585 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr338: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st155 - st155: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof155 - } - st_case_155: -//line plugins/parsers/influx/machine.go:22477 - switch ( m.data)[( m.p)] { - case 34: - goto st53 - case 92: - goto st53 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st1 - st586: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof586 - } - st_case_586: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st587 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 - st587: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof587 - } - st_case_587: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st587 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 - st588: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof588 - } - st_case_588: - switch ( m.data)[( m.p)] { - case 10: - goto tr737 - case 11: - goto tr883 - case 13: - goto tr739 - case 32: - goto tr882 - case 34: - goto tr89 - case 44: - goto tr884 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr882 - } - goto st53 - st589: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof589 - } - st_case_589: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st589 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr213: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st590 - st590: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof590 - } - st_case_590: -//line plugins/parsers/influx/machine.go:22641 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - case 117: - goto st591 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st587 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 - st591: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof591 - } - st_case_591: - switch ( m.data)[( m.p)] { - case 10: - goto tr743 - case 11: - goto tr887 - case 13: - goto tr745 - case 32: - goto tr886 - case 34: - goto tr89 - case 44: - goto tr888 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr886 - } - goto st53 -tr214: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st592 - st592: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof592 - } - st_case_592: -//line plugins/parsers/influx/machine.go:22713 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - case 117: - goto st591 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st592 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr215: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st593 - st593: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof593 - } - st_case_593: -//line plugins/parsers/influx/machine.go:22760 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 65: - goto st156 - case 92: - goto st155 - case 97: - goto st159 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st156: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof156 - } - st_case_156: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 76: - goto st157 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st157: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof157 - } - st_case_157: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 83: - goto st158 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st158: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof158 - } - st_case_158: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 69: - goto st594 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st594: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof594 - } - st_case_594: - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st159: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof159 - } - st_case_159: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 108: - goto st160 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st160: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof160 - } - st_case_160: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 115: - goto st161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st161: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof161 - } - st_case_161: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 101: - goto st594 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 -tr216: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st595 - st595: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof595 - } - st_case_595: -//line plugins/parsers/influx/machine.go:22983 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 82: - goto st162 - case 92: - goto st155 - case 114: - goto st163 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st162: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof162 - } - st_case_162: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 85: - goto st158 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st163: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof163 - } - st_case_163: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 117: - goto st161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 -tr217: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st596 - st596: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof596 - } - st_case_596: -//line plugins/parsers/influx/machine.go:23073 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 92: - goto st155 - case 97: - goto st159 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 -tr218: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st597 - st597: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof597 - } - st_case_597: -//line plugins/parsers/influx/machine.go:23107 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 92: - goto st155 - case 114: - goto st163 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st164: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof164 - } - st_case_164: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr337 - case 13: - goto st6 - case 32: - goto st164 - case 34: - goto tr116 - case 35: - goto st6 - case 44: - goto st6 - case 92: - goto tr338 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st164 - } - goto tr335 -tr337: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st165 - st165: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof165 - } - st_case_165: -//line plugins/parsers/influx/machine.go:23168 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr340 - case 13: - goto st6 - case 32: - goto tr339 - case 34: - goto tr83 - case 35: - goto st53 - case 44: - goto tr180 - case 92: - goto tr338 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr339 - } - goto tr335 -tr339: - ( m.cs) = 166 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st166: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof166 - } - st_case_166: -//line plugins/parsers/influx/machine.go:23209 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr342 - case 13: - goto st6 - case 32: - goto st166 - case 34: - goto tr122 - case 35: - goto tr158 - case 44: - goto st6 - case 61: - goto tr335 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st166 - } - goto tr182 -tr342: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st167 -tr343: - ( m.cs) = 167 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st167: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof167 - } - st_case_167: -//line plugins/parsers/influx/machine.go:23262 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr343 - case 13: - goto st6 - case 32: - goto tr339 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr344 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr339 - } - goto tr182 -tr340: - ( m.cs) = 168 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st168: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof168 - } - st_case_168: -//line plugins/parsers/influx/machine.go:23307 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr343 - case 13: - goto st6 - case 32: - goto tr339 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr335 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr339 - } - goto tr182 -tr538: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st169 - st169: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof169 - } - st_case_169: -//line plugins/parsers/influx/machine.go:23341 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st598 - } - goto st6 -tr539: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st598 - st598: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof598 - } - st_case_598: -//line plugins/parsers/influx/machine.go:23365 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st599 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st599: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof599 - } - st_case_599: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st600 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st600: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof600 - } - st_case_600: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st601 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st601: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof601 - } - st_case_601: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st602 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st602: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof602 - } - st_case_602: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st603 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st603: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof603 - } - st_case_603: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st604 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st604: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof604 - } - st_case_604: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st605 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st605: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof605 - } - st_case_605: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st606 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st606: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof606 - } - st_case_606: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st607 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st607: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof607 - } - st_case_607: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st608 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st608: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof608 - } - st_case_608: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st609 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st609: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof609 - } - st_case_609: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st610 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st610: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof610 - } - st_case_610: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st611 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st611: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof611 - } - st_case_611: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st612 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st612: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof612 - } - st_case_612: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st613 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st613: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof613 - } - st_case_613: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st614 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st614: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof614 - } - st_case_614: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st615 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st615: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof615 - } - st_case_615: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st616 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st616: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof616 - } - st_case_616: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr673 - } - goto st6 -tr917: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st170 -tr534: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr924: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr926: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr929: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st170: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof170 - } - st_case_170: -//line plugins/parsers/influx/machine.go:23913 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr347 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr346 -tr346: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st171 - st171: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof171 - } - st_case_171: -//line plugins/parsers/influx/machine.go:23945 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr349 - case 92: - goto st183 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st171 -tr349: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st172 - st172: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof172 - } - st_case_172: -//line plugins/parsers/influx/machine.go:23977 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr351 - case 45: - goto tr165 - case 46: - goto tr166 - case 48: - goto tr167 - case 70: - goto tr352 - case 84: - goto tr353 - case 92: - goto st73 - case 102: - goto tr354 - case 116: - goto tr355 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr168 - } - goto st6 -tr351: - ( m.cs) = 617 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st617: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof617 - } - st_case_617: -//line plugins/parsers/influx/machine.go:24022 - switch ( m.data)[( m.p)] { - case 10: - goto tr665 - case 13: - goto tr667 - case 32: - goto tr916 - case 34: - goto tr25 - case 44: - goto tr917 - case 92: - goto tr26 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr916 - } - goto tr23 -tr167: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st618 - st618: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof618 - } - st_case_618: -//line plugins/parsers/influx/machine.go:24052 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - case 117: - goto st624 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st619 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st619: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof619 - } - st_case_619: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st619 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st173: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof173 - } - st_case_173: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr356 - case 43: - goto st174 - case 45: - goto st174 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - goto st6 -tr356: - ( m.cs) = 620 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st620: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof620 - } - st_case_620: -//line plugins/parsers/influx/machine.go:24159 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 - } - case ( m.data)[( m.p)] >= 9: - goto st271 - } - goto tr103 - st621: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof621 - } - st_case_621: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 - st174: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof174 - } - st_case_174: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - goto st6 - st622: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof622 - } - st_case_622: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st623: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof623 - } - st_case_623: - switch ( m.data)[( m.p)] { - case 10: - goto tr737 - case 13: - goto tr739 - case 32: - goto tr923 - case 34: - goto tr29 - case 44: - goto tr924 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr923 - } - goto st6 - st624: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof624 - } - st_case_624: - switch ( m.data)[( m.p)] { - case 10: - goto tr743 - case 13: - goto tr745 - case 32: - goto tr925 - case 34: - goto tr29 - case 44: - goto tr926 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr925 - } - goto st6 -tr168: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st625 - st625: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof625 - } - st_case_625: -//line plugins/parsers/influx/machine.go:24305 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - case 117: - goto st624 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st625 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 -tr352: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st626 - st626: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof626 - } - st_case_626: -//line plugins/parsers/influx/machine.go:24350 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 65: - goto st175 - case 92: - goto st73 - case 97: - goto st178 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st175: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof175 - } - st_case_175: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 76: - goto st176 - case 92: - goto st73 - } - goto st6 - st176: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof176 - } - st_case_176: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 83: - goto st177 - case 92: - goto st73 - } - goto st6 - st177: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof177 - } - st_case_177: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 69: - goto st627 - case 92: - goto st73 - } - goto st6 - st627: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof627 - } - st_case_627: - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st178: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof178 - } - st_case_178: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 108: - goto st179 - } - goto st6 - st179: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof179 - } - st_case_179: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 115: - goto st180 - } - goto st6 - st180: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof180 - } - st_case_180: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 101: - goto st627 - } - goto st6 -tr353: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st628 - st628: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof628 - } - st_case_628: -//line plugins/parsers/influx/machine.go:24503 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 82: - goto st181 - case 92: - goto st73 - case 114: - goto st182 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st181: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof181 - } - st_case_181: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 85: - goto st177 - case 92: - goto st73 - } - goto st6 - st182: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof182 - } - st_case_182: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 117: - goto st180 - } - goto st6 -tr354: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st629 - st629: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof629 - } - st_case_629: -//line plugins/parsers/influx/machine.go:24569 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 97: - goto st178 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr355: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st630 - st630: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof630 - } - st_case_630: -//line plugins/parsers/influx/machine.go:24601 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 114: - goto st182 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr347: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st183 - st183: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof183 - } - st_case_183: -//line plugins/parsers/influx/machine.go:24633 - switch ( m.data)[( m.p)] { - case 34: - goto st171 - case 92: - goto st171 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st631: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof631 - } - st_case_631: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st619 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st632: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof632 - } - st_case_632: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st632 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 -tr169: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st633 - st633: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof633 - } - st_case_633: -//line plugins/parsers/influx/machine.go:24732 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 65: - goto st184 - case 92: - goto st73 - case 97: - goto st187 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st184: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof184 - } - st_case_184: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 76: - goto st185 - case 92: - goto st73 - } - goto st6 - st185: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof185 - } - st_case_185: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 83: - goto st186 - case 92: - goto st73 - } - goto st6 - st186: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof186 - } - st_case_186: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 69: - goto st634 - case 92: - goto st73 - } - goto st6 - st634: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof634 - } - st_case_634: - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st187: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof187 - } - st_case_187: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 108: - goto st188 - } - goto st6 - st188: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof188 - } - st_case_188: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 115: - goto st189 - } - goto st6 - st189: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof189 - } - st_case_189: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 101: - goto st634 - } - goto st6 -tr170: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st635 - st635: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof635 - } - st_case_635: -//line plugins/parsers/influx/machine.go:24885 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 82: - goto st190 - case 92: - goto st73 - case 114: - goto st191 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st190: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof190 - } - st_case_190: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 85: - goto st186 - case 92: - goto st73 - } - goto st6 - st191: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof191 - } - st_case_191: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 117: - goto st189 - } - goto st6 -tr171: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st636 - st636: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof636 - } - st_case_636: -//line plugins/parsers/influx/machine.go:24951 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 97: - goto st187 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr172: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st637 - st637: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof637 - } - st_case_637: -//line plugins/parsers/influx/machine.go:24983 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 114: - goto st191 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr160: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st192 - st192: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof192 - } - st_case_192: -//line plugins/parsers/influx/machine.go:25015 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr160 - case 13: - goto st6 - case 32: - goto st48 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st48 - } - goto tr158 -tr138: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st193 - st193: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof193 - } - st_case_193: -//line plugins/parsers/influx/machine.go:25049 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 46: - goto st194 - case 48: - goto st639 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st642 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st15 -tr139: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st194 - st194: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof194 - } - st_case_194: -//line plugins/parsers/influx/machine.go:25090 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st638 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st15 - st638: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof638 - } - st_case_638: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st638 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st195: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof195 - } - st_case_195: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 34: - goto st196 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - default: - goto st196 - } - goto st15 - st196: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof196 - } - st_case_196: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st15 - st639: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof639 - } - st_case_639: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st640 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st640: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof640 - } - st_case_640: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st640 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st641: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof641 - } - st_case_641: - switch ( m.data)[( m.p)] { - case 10: - goto tr942 - case 11: - goto tr943 - case 13: - goto tr944 - case 32: - goto tr941 - case 44: - goto tr945 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr941 - } - goto st15 - st642: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof642 - } - st_case_642: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st642 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 -tr140: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st643 - st643: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof643 - } - st_case_643: -//line plugins/parsers/influx/machine.go:25364 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - case 117: - goto st644 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st640 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st644: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof644 - } - st_case_644: - switch ( m.data)[( m.p)] { - case 10: - goto tr948 - case 11: - goto tr949 - case 13: - goto tr950 - case 32: - goto tr947 - case 44: - goto tr951 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr947 - } - goto st15 -tr141: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st645 - st645: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof645 - } - st_case_645: -//line plugins/parsers/influx/machine.go:25436 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - case 117: - goto st644 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st645 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 -tr142: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st646 - st646: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof646 - } - st_case_646: -//line plugins/parsers/influx/machine.go:25483 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 65: - goto st197 - case 92: - goto st21 - case 97: - goto st200 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 - st197: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof197 - } - st_case_197: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 76: - goto st198 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st198: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof198 - } - st_case_198: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 83: - goto st199 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st199: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof199 - } - st_case_199: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 69: - goto st647 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st647: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof647 - } - st_case_647: - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 - st200: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof200 - } - st_case_200: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 108: - goto st201 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st201: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof201 - } - st_case_201: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 115: - goto st202 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st202: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof202 - } - st_case_202: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 101: - goto st647 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 -tr143: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st648 - st648: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof648 - } - st_case_648: -//line plugins/parsers/influx/machine.go:25706 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 82: - goto st203 - case 92: - goto st21 - case 114: - goto st204 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 - st203: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof203 - } - st_case_203: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 85: - goto st199 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st204: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof204 - } - st_case_204: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 117: - goto st202 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 -tr144: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st649 - st649: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof649 - } - st_case_649: -//line plugins/parsers/influx/machine.go:25796 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 92: - goto st21 - case 97: - goto st200 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 -tr145: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st650 - st650: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof650 - } - st_case_650: -//line plugins/parsers/influx/machine.go:25830 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 92: - goto st21 - case 114: - goto st204 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 -tr121: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st205 -tr380: - ( m.cs) = 205 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st205: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof205 - } - st_case_205: -//line plugins/parsers/influx/machine.go:25881 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr380 - case 13: - goto st6 - case 32: - goto tr117 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto tr381 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr117 - } - goto tr119 -tr118: - ( m.cs) = 206 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st206: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof206 - } - st_case_206: -//line plugins/parsers/influx/machine.go:25926 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr380 - case 13: - goto st6 - case 32: - goto tr117 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto tr80 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr117 - } - goto tr119 -tr497: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st207 - st207: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof207 - } - st_case_207: -//line plugins/parsers/influx/machine.go:25960 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st651 - } - goto st6 -tr498: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st651 - st651: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof651 - } - st_case_651: -//line plugins/parsers/influx/machine.go:25984 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st652 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st652: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof652 - } - st_case_652: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st653 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st653: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof653 - } - st_case_653: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st654 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st654: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof654 - } - st_case_654: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st655 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st655: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof655 - } - st_case_655: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st656 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st656: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof656 - } - st_case_656: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st657 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st657: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof657 - } - st_case_657: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st658 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st658: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof658 - } - st_case_658: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st659 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st659: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof659 - } - st_case_659: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st660 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st660: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof660 - } - st_case_660: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st661 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st661: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof661 - } - st_case_661: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st662 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st662: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof662 - } - st_case_662: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st663 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st663: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof663 - } - st_case_663: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st664 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st664: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof664 - } - st_case_664: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st665 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st665: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof665 - } - st_case_665: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st666 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st666: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof666 - } - st_case_666: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st667 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st667: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof667 - } - st_case_667: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st668 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st668: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof668 - } - st_case_668: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st669 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st669: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof669 - } - st_case_669: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr599 - } - goto st6 -tr494: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st208 -tr981: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr986: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr989: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr992: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st208: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof208 - } - st_case_208: -//line plugins/parsers/influx/machine.go:26532 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr384 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr385 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr383 -tr383: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st209 - st209: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof209 - } - st_case_209: -//line plugins/parsers/influx/machine.go:26564 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr387 - case 92: - goto st223 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st209 -tr387: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st210 - st210: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof210 - } - st_case_210: -//line plugins/parsers/influx/machine.go:26596 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr351 - case 45: - goto tr389 - case 46: - goto tr390 - case 48: - goto tr391 - case 70: - goto tr110 - case 84: - goto tr111 - case 92: - goto st73 - case 102: - goto tr112 - case 116: - goto tr113 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr392 - } - goto st6 -tr389: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st211 - st211: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof211 - } - st_case_211: -//line plugins/parsers/influx/machine.go:26634 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 46: - goto st212 - case 48: - goto st672 - case 92: - goto st73 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st675 - } - goto st6 -tr390: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st212 - st212: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof212 - } - st_case_212: -//line plugins/parsers/influx/machine.go:26662 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st670 - } - goto st6 - st670: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof670 - } - st_case_670: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st670 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st213: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof213 - } - st_case_213: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr356 - case 43: - goto st214 - case 45: - goto st214 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 - } - goto st6 - st214: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof214 - } - st_case_214: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 - } - goto st6 - st671: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof671 - } - st_case_671: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st672: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof672 - } - st_case_672: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st673: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof673 - } - st_case_673: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st674: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof674 - } - st_case_674: - switch ( m.data)[( m.p)] { - case 10: - goto tr791 - case 13: - goto tr793 - case 32: - goto tr985 - case 34: - goto tr29 - case 44: - goto tr986 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr985 - } - goto st6 - st675: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof675 - } - st_case_675: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st675 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr391: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st676 - st676: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof676 - } - st_case_676: -//line plugins/parsers/influx/machine.go:26913 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - case 117: - goto st677 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st677: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof677 - } - st_case_677: - switch ( m.data)[( m.p)] { - case 10: - goto tr797 - case 13: - goto tr799 - case 32: - goto tr988 - case 34: - goto tr29 - case 44: - goto tr989 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr988 - } - goto st6 -tr392: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st678 - st678: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof678 - } - st_case_678: -//line plugins/parsers/influx/machine.go:26981 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - case 117: - goto st677 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st678 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr110: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st679 - st679: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof679 - } - st_case_679: -//line plugins/parsers/influx/machine.go:27026 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 65: - goto st215 - case 92: - goto st73 - case 97: - goto st218 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 - st215: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof215 - } - st_case_215: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 76: - goto st216 - case 92: - goto st73 - } - goto st6 - st216: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof216 - } - st_case_216: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 83: - goto st217 - case 92: - goto st73 - } - goto st6 - st217: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof217 - } - st_case_217: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 69: - goto st680 - case 92: - goto st73 - } - goto st6 - st680: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof680 - } - st_case_680: - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 - st218: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof218 - } - st_case_218: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 108: - goto st219 - } - goto st6 - st219: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof219 - } - st_case_219: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 115: - goto st220 - } - goto st6 - st220: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof220 - } - st_case_220: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 101: - goto st680 - } - goto st6 -tr111: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st681 - st681: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof681 - } - st_case_681: -//line plugins/parsers/influx/machine.go:27179 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 82: - goto st221 - case 92: - goto st73 - case 114: - goto st222 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 - st221: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof221 - } - st_case_221: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 85: - goto st217 - case 92: - goto st73 - } - goto st6 - st222: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof222 - } - st_case_222: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 117: - goto st220 - } - goto st6 -tr112: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st682 - st682: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof682 - } - st_case_682: -//line plugins/parsers/influx/machine.go:27245 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 92: - goto st73 - case 97: - goto st218 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 -tr113: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st683 - st683: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof683 - } - st_case_683: -//line plugins/parsers/influx/machine.go:27277 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 92: - goto st73 - case 114: - goto st222 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 -tr385: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st223 - st223: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof223 - } - st_case_223: -//line plugins/parsers/influx/machine.go:27309 - switch ( m.data)[( m.p)] { - case 34: - goto st209 - case 92: - goto st209 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 -tr106: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st224 - st224: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof224 - } - st_case_224: -//line plugins/parsers/influx/machine.go:27336 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 46: - goto st225 - case 48: - goto st686 - case 92: - goto st73 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st689 - } - goto st6 -tr107: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st225 - st225: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof225 - } - st_case_225: -//line plugins/parsers/influx/machine.go:27364 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st684 - } - goto st6 - st684: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof684 - } - st_case_684: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st684 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st226: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof226 - } - st_case_226: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr356 - case 43: - goto st227 - case 45: - goto st227 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st685 - } - goto st6 - st227: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof227 - } - st_case_227: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st685 - } - goto st6 - st685: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof685 - } - st_case_685: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st685 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st686: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof686 - } - st_case_686: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st687 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st687: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof687 - } - st_case_687: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st687 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st688: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof688 - } - st_case_688: - switch ( m.data)[( m.p)] { - case 10: - goto tr817 - case 13: - goto tr793 - case 32: - goto tr985 - case 34: - goto tr29 - case 44: - goto tr986 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr985 - } - goto st6 - st689: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof689 - } - st_case_689: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st689 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr108: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st690 - st690: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof690 - } - st_case_690: -//line plugins/parsers/influx/machine.go:27615 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - case 117: - goto st691 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st687 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st691: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof691 - } - st_case_691: - switch ( m.data)[( m.p)] { - case 10: - goto tr822 - case 13: - goto tr799 - case 32: - goto tr988 - case 34: - goto tr29 - case 44: - goto tr989 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr988 - } - goto st6 -tr109: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st692 - st692: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof692 - } - st_case_692: -//line plugins/parsers/influx/machine.go:27683 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - case 117: - goto st691 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st692 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr94: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st228 - st228: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof228 - } - st_case_228: -//line plugins/parsers/influx/machine.go:27728 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr94 - case 13: - goto st6 - case 32: - goto st30 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st30 - } - goto tr92 -tr72: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st229 - st229: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof229 - } - st_case_229: -//line plugins/parsers/influx/machine.go:27762 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 46: - goto st230 - case 48: - goto st694 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st697 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st1 -tr73: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st230 - st230: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof230 - } - st_case_230: -//line plugins/parsers/influx/machine.go:27801 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st693 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st1 - st693: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof693 - } - st_case_693: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st693 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st231: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof231 - } - st_case_231: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 34: - goto st232 - case 44: - goto tr4 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - default: - goto st232 - } - goto st1 - st232: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof232 - } - st_case_232: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st1 - st694: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof694 - } - st_case_694: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st695 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st695: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof695 - } - st_case_695: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st695 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st696: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof696 - } - st_case_696: - switch ( m.data)[( m.p)] { - case 10: - goto tr942 - case 11: - goto tr1006 - case 13: - goto tr944 - case 32: - goto tr1005 - case 44: - goto tr1007 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1005 - } - goto st1 - st697: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof697 - } - st_case_697: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st697 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 -tr74: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st698 - st698: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof698 - } - st_case_698: -//line plugins/parsers/influx/machine.go:28059 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - case 117: - goto st699 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st695 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st699: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof699 - } - st_case_699: - switch ( m.data)[( m.p)] { - case 10: - goto tr948 - case 11: - goto tr1010 - case 13: - goto tr950 - case 32: - goto tr1009 - case 44: - goto tr1011 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1009 - } - goto st1 -tr75: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st700 - st700: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof700 - } - st_case_700: -//line plugins/parsers/influx/machine.go:28127 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - case 117: - goto st699 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st700 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 -tr76: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st701 - st701: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof701 - } - st_case_701: -//line plugins/parsers/influx/machine.go:28172 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 65: - goto st233 - case 92: - goto st94 - case 97: - goto st236 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 - st233: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof233 - } - st_case_233: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 76: - goto st234 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st234: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof234 - } - st_case_234: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 83: - goto st235 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st235: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof235 - } - st_case_235: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 69: - goto st702 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st702: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof702 - } - st_case_702: - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 - st236: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof236 - } - st_case_236: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 108: - goto st237 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st237: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof237 - } - st_case_237: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 115: - goto st238 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st238: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof238 - } - st_case_238: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 101: - goto st702 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 -tr77: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st703 - st703: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof703 - } - st_case_703: -//line plugins/parsers/influx/machine.go:28379 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 82: - goto st239 - case 92: - goto st94 - case 114: - goto st240 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 - st239: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof239 - } - st_case_239: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 85: - goto st235 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st240: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof240 - } - st_case_240: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 117: - goto st238 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 -tr78: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st704 - st704: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof704 - } - st_case_704: -//line plugins/parsers/influx/machine.go:28463 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 92: - goto st94 - case 97: - goto st236 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 -tr79: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st705 - st705: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof705 - } - st_case_705: -//line plugins/parsers/influx/machine.go:28495 - switch ( m.data)[( m.p)] { + st_case_51: +//line plugins/parsers/influx/machine.go:1302 + switch (m.data)[(m.p)] { case 10: - goto tr954 - case 11: - goto tr1014 + goto tr34 case 13: - goto tr956 + goto st9 case 32: - goto tr1013 - case 44: - goto tr1015 - case 92: - goto st94 - case 114: - goto st240 + goto st51 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto st51 } - goto st1 -tr42: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st241 -tr422: - ( m.cs) = 241 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st241: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof241 + goto st0 + st52: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof52 } - st_case_241: -//line plugins/parsers/influx/machine.go:28544 - switch ( m.data)[( m.p)] { + st_case_52: + switch (m.data)[(m.p)] { case 10: - goto tr421 - case 11: - goto tr422 + goto tr89 case 13: - goto tr421 + goto tr90 case 32: - goto tr36 - case 44: - goto tr4 - case 61: - goto tr423 - case 92: - goto tr43 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr36 + switch { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st53 + } + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr39 -tr38: - ( m.cs) = 242 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st242: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof242 + goto tr35 + st53: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof53 } - st_case_242: -//line plugins/parsers/influx/machine.go:28587 - switch ( m.data)[( m.p)] { + st_case_53: + switch (m.data)[(m.p)] { case 10: - goto tr421 - case 11: - goto tr422 + goto tr89 case 13: - goto tr421 + goto tr90 case 32: - goto tr36 - case 44: - goto tr4 - case 61: - goto tr31 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr36 - } - goto tr39 -tr462: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st243 - st243: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof243 + goto tr88 } - st_case_243: -//line plugins/parsers/influx/machine.go:28619 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st706 + switch { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st54 + } + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 -tr463: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st706 - st706: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof706 + goto tr35 + st54: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof54 } - st_case_706: -//line plugins/parsers/influx/machine.go:28635 - switch ( m.data)[( m.p)] { + st_case_54: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st707 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st55 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st707: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof707 + goto tr35 + st55: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof55 } - st_case_707: - switch ( m.data)[( m.p)] { + st_case_55: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st708 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st56 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st708: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof708 + goto tr35 + st56: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof56 } - st_case_708: - switch ( m.data)[( m.p)] { + st_case_56: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st709 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st57 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st709: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof709 + goto tr35 + st57: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof57 } - st_case_709: - switch ( m.data)[( m.p)] { + st_case_57: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st710 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st58 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st710: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof710 + goto tr35 + st58: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof58 } - st_case_710: - switch ( m.data)[( m.p)] { + st_case_58: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st711 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st59 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st711: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof711 + goto tr35 + st59: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof59 } - st_case_711: - switch ( m.data)[( m.p)] { + st_case_59: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st712 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st60 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st712: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof712 + goto tr35 + st60: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof60 } - st_case_712: - switch ( m.data)[( m.p)] { + st_case_60: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st713 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st61 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st713: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof713 + goto tr35 + st61: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof61 } - st_case_713: - switch ( m.data)[( m.p)] { + st_case_61: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st714 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st62 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st714: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof714 + goto tr35 + st62: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof62 } - st_case_714: - switch ( m.data)[( m.p)] { + st_case_62: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st715 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st63 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st715: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof715 + goto tr35 + st63: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof63 } - st_case_715: - switch ( m.data)[( m.p)] { + st_case_63: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st716 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st64 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st716: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof716 + goto tr35 + st64: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof64 } - st_case_716: - switch ( m.data)[( m.p)] { + st_case_64: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st717 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st65 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st717: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof717 + goto tr35 + st65: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof65 } - st_case_717: - switch ( m.data)[( m.p)] { + st_case_65: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st718 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st66 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st718: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof718 + goto tr35 + st66: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof66 } - st_case_718: - switch ( m.data)[( m.p)] { + st_case_66: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st719 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st67 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st719: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof719 + goto tr35 + st67: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof67 } - st_case_719: - switch ( m.data)[( m.p)] { + st_case_67: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st720 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st68 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st720: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof720 + goto tr35 + st68: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof68 } - st_case_720: - switch ( m.data)[( m.p)] { + st_case_68: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st721 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st69 } - case ( m.data)[( m.p)] >= 9: - goto tr467 + case (m.data)[(m.p)] >= 9: + goto tr88 } - goto tr424 - st721: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof721 + goto tr35 + st69: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof69 } - st_case_721: - switch ( m.data)[( m.p)] { + st_case_69: + switch (m.data)[(m.p)] { case 10: - goto tr468 + goto tr89 case 13: - goto tr470 + goto tr90 case 32: - goto tr467 + goto tr88 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st722 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr88 + } + goto tr35 + tr113: + (m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } + + goto _again + tr120: + (m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st722: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof722 } - st_case_722: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 + + goto _again + tr125: + (m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st723 + + goto _again + tr130: + (m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out } - case ( m.data)[( m.p)] >= 9: - goto tr467 } - goto tr424 - st723: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof723 + + goto _again + st11: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof11 } - st_case_723: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 + st_case_11: +//line plugins/parsers/influx/machine.go:1763 + switch (m.data)[(m.p)] { case 32: - goto tr467 + goto tr7 + case 44: + goto tr7 + case 61: + goto tr7 + case 92: + goto tr8 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st724 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr7 + } + goto tr5 + tr8: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st12 + st12: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof12 } - goto tr424 - st724: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof724 + st_case_12: +//line plugins/parsers/influx/machine.go:1789 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr7 } - st_case_724: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 + goto st3 + tr24: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st13 + st13: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof13 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 + st_case_13: +//line plugins/parsers/influx/machine.go:1805 + switch (m.data)[(m.p)] { + case 34: + goto st6 + case 92: + goto st6 } - goto tr424 -tr15: + goto tr7 + tr13: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st244 - st244: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof244 + goto st14 + st14: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof14 } - st_case_244: -//line plugins/parsers/influx/machine.go:29055 - switch ( m.data)[( m.p)] { + st_case_14: +//line plugins/parsers/influx/machine.go:1824 + switch (m.data)[(m.p)] { case 46: - goto st245 + goto st15 case 48: - goto st726 + goto st72 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st729 + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st75 } - goto tr8 -tr16: + goto tr7 + tr14: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st245 - st245: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof245 + goto st15 + st15: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof15 } - st_case_245: -//line plugins/parsers/influx/machine.go:29077 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st725 + st_case_15: +//line plugins/parsers/influx/machine.go:1846 + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st70 } - goto tr8 - st725: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof725 + goto tr7 + st70: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof70 } - st_case_725: - switch ( m.data)[( m.p)] { + st_case_70: + switch (m.data)[(m.p)] { case 10: - goto tr730 + goto tr111 case 13: - goto tr732 + goto tr112 case 32: - goto tr921 + goto tr110 case 44: - goto tr922 + goto tr113 case 69: - goto st246 + goto st16 case 101: - goto st246 + goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st725 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st70 } - case ( m.data)[( m.p)] >= 9: - goto tr921 + case (m.data)[(m.p)] >= 9: + goto tr110 } - goto tr103 - st246: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof246 + goto tr82 + st16: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof16 } - st_case_246: - switch ( m.data)[( m.p)] { + st_case_16: + switch (m.data)[(m.p)] { case 34: - goto st247 + goto st17 case 43: - goto st247 + goto st17 case 45: - goto st247 + goto st17 + } + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st71 + } + goto tr7 + st17: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof17 + } + st_case_17: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st71 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 + goto tr7 + st71: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof71 } - goto tr8 - st247: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof247 + st_case_71: + switch (m.data)[(m.p)] { + case 10: + goto tr111 + case 13: + goto tr112 + case 32: + goto tr110 + case 44: + goto tr113 } - st_case_247: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 + switch { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st71 + } + case (m.data)[(m.p)] >= 9: + goto tr110 } - goto tr8 - st726: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof726 + goto tr82 + st72: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof72 } - st_case_726: - switch ( m.data)[( m.p)] { + st_case_72: + switch (m.data)[(m.p)] { case 10: - goto tr730 + goto tr111 case 13: - goto tr732 + goto tr112 case 32: - goto tr921 + goto tr110 case 44: - goto tr922 + goto tr113 case 46: - goto st725 + goto st70 case 69: - goto st246 + goto st16 case 101: - goto st246 + goto st16 case 105: - goto st728 + goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st727 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st73 } - case ( m.data)[( m.p)] >= 9: - goto tr921 + case (m.data)[(m.p)] >= 9: + goto tr110 } - goto tr103 - st727: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof727 + goto tr82 + st73: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof73 } - st_case_727: - switch ( m.data)[( m.p)] { + st_case_73: + switch (m.data)[(m.p)] { case 10: - goto tr730 + goto tr111 case 13: - goto tr732 + goto tr112 case 32: - goto tr921 + goto tr110 case 44: - goto tr922 + goto tr113 case 46: - goto st725 + goto st70 case 69: - goto st246 + goto st16 case 101: - goto st246 + goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st727 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st73 } - case ( m.data)[( m.p)] >= 9: - goto tr921 + case (m.data)[(m.p)] >= 9: + goto tr110 } - goto tr103 - st728: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof728 + goto tr82 + st74: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof74 } - st_case_728: - switch ( m.data)[( m.p)] { + st_case_74: + switch (m.data)[(m.p)] { case 10: - goto tr942 + goto tr118 case 13: - goto tr944 + goto tr119 case 32: - goto tr1041 + goto tr117 case 44: - goto tr1042 + goto tr120 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1041 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr117 } - goto tr103 - st729: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof729 + goto tr82 + st75: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof75 } - st_case_729: - switch ( m.data)[( m.p)] { + st_case_75: + switch (m.data)[(m.p)] { case 10: - goto tr730 + goto tr111 case 13: - goto tr732 + goto tr112 case 32: - goto tr921 + goto tr110 case 44: - goto tr922 + goto tr113 case 46: - goto st725 + goto st70 case 69: - goto st246 + goto st16 case 101: - goto st246 + goto st16 case 105: - goto st728 + goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st729 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st75 } - case ( m.data)[( m.p)] >= 9: - goto tr921 + case (m.data)[(m.p)] >= 9: + goto tr110 } - goto tr103 -tr17: + goto tr82 + tr15: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st730 - st730: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof730 + goto st76 + st76: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof76 } - st_case_730: -//line plugins/parsers/influx/machine.go:29260 - switch ( m.data)[( m.p)] { + st_case_76: +//line plugins/parsers/influx/machine.go:2053 + switch (m.data)[(m.p)] { case 10: - goto tr730 + goto tr111 case 13: - goto tr732 + goto tr112 case 32: - goto tr921 + goto tr110 case 44: - goto tr922 + goto tr113 case 46: - goto st725 + goto st70 case 69: - goto st246 + goto st16 case 101: - goto st246 + goto st16 case 105: - goto st728 + goto st74 case 117: - goto st731 + goto st77 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st727 + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st73 } - case ( m.data)[( m.p)] >= 9: - goto tr921 + case (m.data)[(m.p)] >= 9: + goto tr110 } - goto tr103 - st731: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof731 + goto tr82 + st77: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof77 } - st_case_731: - switch ( m.data)[( m.p)] { + st_case_77: + switch (m.data)[(m.p)] { case 10: - goto tr948 + goto tr123 case 13: - goto tr950 + goto tr124 case 32: - goto tr1044 + goto tr122 case 44: - goto tr1045 + goto tr125 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1044 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr122 } - goto tr103 -tr18: + goto tr82 + tr16: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st732 - st732: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof732 + goto st78 + st78: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof78 } - st_case_732: -//line plugins/parsers/influx/machine.go:29320 - switch ( m.data)[( m.p)] { + st_case_78: +//line plugins/parsers/influx/machine.go:2113 + switch (m.data)[(m.p)] { case 10: - goto tr730 + goto tr111 case 13: - goto tr732 + goto tr112 case 32: - goto tr921 + goto tr110 case 44: - goto tr922 + goto tr113 case 46: - goto st725 + goto st70 case 69: - goto st246 + goto st16 case 101: - goto st246 + goto st16 case 105: - goto st728 + goto st74 case 117: - goto st731 + goto st77 + } + switch { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st78 + } + case (m.data)[(m.p)] >= 9: + goto tr110 + } + goto tr82 + tr17: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st79 + st79: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof79 + } + st_case_79: +//line plugins/parsers/influx/machine.go:2154 + switch (m.data)[(m.p)] { + case 10: + goto tr128 + case 13: + goto tr129 + case 32: + goto tr127 + case 44: + goto tr130 + case 65: + goto st18 + case 97: + goto st21 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr127 + } + goto tr82 + st18: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof18 + } + st_case_18: + if (m.data)[(m.p)] == 76 { + goto st19 + } + goto tr7 + st19: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof19 + } + st_case_19: + if (m.data)[(m.p)] == 83 { + goto st20 + } + goto tr7 + st20: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof20 + } + st_case_20: + if (m.data)[(m.p)] == 69 { + goto st80 + } + goto tr7 + st80: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof80 + } + st_case_80: + switch (m.data)[(m.p)] { + case 10: + goto tr128 + case 13: + goto tr129 + case 32: + goto tr127 + case 44: + goto tr130 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr127 + } + goto tr82 + st21: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof21 + } + st_case_21: + if (m.data)[(m.p)] == 108 { + goto st22 + } + goto tr7 + st22: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof22 + } + st_case_22: + if (m.data)[(m.p)] == 115 { + goto st23 + } + goto tr7 + st23: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof23 + } + st_case_23: + if (m.data)[(m.p)] == 101 { + goto st80 + } + goto tr7 + tr18: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st81 + st81: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof81 + } + st_case_81: +//line plugins/parsers/influx/machine.go:2257 + switch (m.data)[(m.p)] { + case 10: + goto tr128 + case 13: + goto tr129 + case 32: + goto tr127 + case 44: + goto tr130 + case 82: + goto st24 + case 114: + goto st25 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr127 + } + goto tr82 + st24: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof24 + } + st_case_24: + if (m.data)[(m.p)] == 85 { + goto st20 + } + goto tr7 + st25: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof25 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st732 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 + st_case_25: + if (m.data)[(m.p)] == 117 { + goto st23 } - goto tr103 -tr19: + goto tr7 + tr19: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st733 - st733: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof733 + goto st82 + st82: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof82 } - st_case_733: -//line plugins/parsers/influx/machine.go:29361 - switch ( m.data)[( m.p)] { + st_case_82: +//line plugins/parsers/influx/machine.go:2305 + switch (m.data)[(m.p)] { case 10: - goto tr954 + goto tr128 case 13: - goto tr956 + goto tr129 case 32: - goto tr1047 + goto tr127 case 44: - goto tr1048 - case 65: - goto st248 + goto tr130 case 97: - goto st251 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 - } - goto tr103 - st248: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof248 - } - st_case_248: - if ( m.data)[( m.p)] == 76 { - goto st249 - } - goto tr8 - st249: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof249 - } - st_case_249: - if ( m.data)[( m.p)] == 83 { - goto st250 - } - goto tr8 - st250: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof250 + goto st21 } - st_case_250: - if ( m.data)[( m.p)] == 69 { - goto st734 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr127 } - goto tr8 - st734: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof734 + goto tr82 + tr20: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st83 + st83: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof83 } - st_case_734: - switch ( m.data)[( m.p)] { + st_case_83: +//line plugins/parsers/influx/machine.go:2333 + switch (m.data)[(m.p)] { case 10: - goto tr954 + goto tr128 case 13: - goto tr956 + goto tr129 case 32: - goto tr1047 + goto tr127 case 44: - goto tr1048 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 + goto tr130 + case 114: + goto st25 } - goto tr103 - st251: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof251 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr127 } - st_case_251: - if ( m.data)[( m.p)] == 108 { - goto st252 + goto tr82 + tr3: + (m.cs) = 26 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } } - goto tr8 - st252: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof252 + + goto _again + tr57: + (m.cs) = 26 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- + + (m.cs) = 34 + { + (m.p)++ + goto _out + } } - st_case_252: - if ( m.data)[( m.p)] == 115 { - goto st253 + + goto _again + st26: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof26 } - goto tr8 - st253: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof253 + st_case_26: +//line plugins/parsers/influx/machine.go:2381 + switch (m.data)[(m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr49 } - st_case_253: - if ( m.data)[( m.p)] == 101 { - goto st734 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr2 } - goto tr8 -tr20: + goto tr48 + tr48: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st735 - st735: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof735 + goto st27 + st27: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof27 } - st_case_735: -//line plugins/parsers/influx/machine.go:29464 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 13: - goto tr956 + st_case_27: +//line plugins/parsers/influx/machine.go:2407 + switch (m.data)[(m.p)] { case 32: - goto tr1047 + goto tr2 case 44: - goto tr1048 - case 82: - goto st254 - case 114: - goto st255 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 + goto tr2 + case 61: + goto tr51 + case 92: + goto st32 } - goto tr103 - st254: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof254 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr2 } - st_case_254: - if ( m.data)[( m.p)] == 85 { - goto st250 + goto st27 + tr51: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st28 + st28: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof28 } - goto tr8 - st255: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof255 + st_case_28: +//line plugins/parsers/influx/machine.go:2433 + switch (m.data)[(m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr54 } - st_case_255: - if ( m.data)[( m.p)] == 117 { - goto st253 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr2 } - goto tr8 -tr21: + goto tr53 + tr53: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st736 - st736: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof736 + goto st29 + st29: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof29 } - st_case_736: -//line plugins/parsers/influx/machine.go:29512 - switch ( m.data)[( m.p)] { + st_case_29: +//line plugins/parsers/influx/machine.go:2459 + switch (m.data)[(m.p)] { case 10: - goto tr954 + goto tr2 case 13: - goto tr956 + goto tr2 case 32: - goto tr1047 + goto tr56 case 44: - goto tr1048 - case 97: - goto st251 + goto tr57 + case 61: + goto tr2 + case 92: + goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr56 } - goto tr103 -tr22: + goto st29 + tr54: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p + + goto st30 + st30: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof30 + } + st_case_30: +//line plugins/parsers/influx/machine.go:2489 + if (m.data)[(m.p)] == 92 { + goto st31 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr2 + } + goto st29 + st31: +//line plugins/parsers/influx/machine.go.rl:248 + (m.p)-- - goto st737 - st737: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof737 + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof31 } - st_case_737: -//line plugins/parsers/influx/machine.go:29540 - switch ( m.data)[( m.p)] { + st_case_31: +//line plugins/parsers/influx/machine.go:2505 + switch (m.data)[(m.p)] { case 10: - goto tr954 + goto tr2 case 13: - goto tr956 + goto tr2 case 32: - goto tr1047 + goto tr56 case 44: - goto tr1048 - case 114: - goto st255 + goto tr57 + case 61: + goto tr2 + case 92: + goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr56 } - goto tr103 -tr9: + goto st29 + tr49: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st256 - st256: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof256 + goto st32 + st32: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof32 } - st_case_256: -//line plugins/parsers/influx/machine.go:29568 - switch ( m.data)[( m.p)] { - case 10: - goto tr8 - case 11: - goto tr9 - case 13: - goto tr8 + st_case_32: +//line plugins/parsers/influx/machine.go:2535 + if (m.data)[(m.p)] == 92 { + goto st33 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr2 + } + goto st27 + st33: +//line plugins/parsers/influx/machine.go.rl:248 + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof33 + } + st_case_33: +//line plugins/parsers/influx/machine.go:2551 + switch (m.data)[(m.p)] { case 32: - goto st2 + goto tr2 case 44: - goto tr8 + goto tr2 case 61: - goto tr12 + goto tr51 case 92: - goto tr10 + goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st2 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr2 } - goto tr6 - st257: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof257 + goto st27 + st34: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof34 } - st_case_257: - if ( m.data)[( m.p)] == 10 { - goto tr438 + st_case_34: + if (m.data)[(m.p)] == 10 { + goto tr62 } - goto st257 -tr438: + goto st34 + tr62: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line //line plugins/parsers/influx/machine.go.rl:78 - {goto st739 } + { + goto st85 + } - goto st738 - st738: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof738 + goto st84 + st84: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof84 } - st_case_738: -//line plugins/parsers/influx/machine.go:29615 + st_case_84: +//line plugins/parsers/influx/machine.go:2592 goto st0 - st260: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof260 + st37: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof37 } - st_case_260: - switch ( m.data)[( m.p)] { + st_case_37: + switch (m.data)[(m.p)] { case 32: - goto tr33 + goto tr31 case 35: - goto tr33 + goto tr31 case 44: - goto tr33 + goto tr31 case 92: - goto tr442 + goto tr66 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr33 - } - case ( m.data)[( m.p)] >= 9: - goto tr33 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { + goto tr31 } - goto tr441 -tr441: + goto tr65 + tr65: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st740 - st740: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof740 + goto st86 + st86: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof86 } - st_case_740: -//line plugins/parsers/influx/machine.go:29656 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 + st_case_86: +//line plugins/parsers/influx/machine.go:2628 + switch (m.data)[(m.p)] { case 10: - goto tr1056 - case 12: - goto tr2 + goto tr138 case 13: - goto tr1057 + goto tr139 case 32: goto tr2 case 44: - goto tr1058 + goto tr140 case 92: - goto st268 + goto st45 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr2 } - goto st740 -tr443: + goto st86 + tr67: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st741 -tr1056: - ( m.cs) = 741 + goto st87 + tr138: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr1060: - ( m.cs) = 741 + goto _again + tr142: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again - st741: + goto _again + st87: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 739; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof741 + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof87 } - st_case_741: -//line plugins/parsers/influx/machine.go:29731 + st_case_87: +//line plugins/parsers/influx/machine.go:2702 goto st0 -tr1057: - ( m.cs) = 261 + tr139: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr1061: - ( m.cs) = 261 + goto _again + tr143: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again - st261: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof261 + goto _again + st38: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof38 } - st_case_261: -//line plugins/parsers/influx/machine.go:29764 - if ( m.data)[( m.p)] == 10 { - goto tr443 + st_case_38: +//line plugins/parsers/influx/machine.go:2735 + if (m.data)[(m.p)] == 10 { + goto tr67 } goto st0 -tr1058: - ( m.cs) = 262 + tr140: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr1062: - ( m.cs) = 262 + goto _again + tr144: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again - st262: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof262 + goto _again + st39: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof39 } - st_case_262: -//line plugins/parsers/influx/machine.go:29800 - switch ( m.data)[( m.p)] { + st_case_39: +//line plugins/parsers/influx/machine.go:2771 + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -29805,61 +2889,51 @@ tr1062: case 61: goto tr2 case 92: - goto tr445 + goto tr69 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } - goto tr444 -tr444: + goto tr68 + tr68: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st263 - st263: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof263 + goto st40 + st40: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof40 } - st_case_263: -//line plugins/parsers/influx/machine.go:29831 - switch ( m.data)[( m.p)] { + st_case_40: +//line plugins/parsers/influx/machine.go:2797 + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: goto tr2 case 61: - goto tr447 + goto tr71 case 92: - goto st266 + goto st43 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } - goto st263 -tr447: + goto st40 + tr71: //line plugins/parsers/influx/machine.go.rl:95 - m.key = m.text() + m.key = m.text() - goto st264 - st264: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof264 + goto st41 + st41: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof41 } - st_case_264: -//line plugins/parsers/influx/machine.go:29862 - switch ( m.data)[( m.p)] { + st_case_41: +//line plugins/parsers/influx/machine.go:2823 + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -29867,1720 +2941,722 @@ tr447: case 61: goto tr2 case 92: - goto tr450 + goto tr74 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } - goto tr449 -tr449: + goto tr73 + tr73: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st742 - st742: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof742 - } - st_case_742: -//line plugins/parsers/influx/machine.go:29893 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 + goto st88 + st88: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof88 + } + st_case_88: +//line plugins/parsers/influx/machine.go:2849 + switch (m.data)[(m.p)] { case 10: - goto tr1060 - case 12: - goto tr2 + goto tr142 case 13: - goto tr1061 + goto tr143 case 32: goto tr2 case 44: - goto tr1062 + goto tr144 case 61: goto tr2 case 92: - goto st265 + goto st42 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr2 } - goto st742 -tr450: + goto st88 + tr74: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st265 - st265: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof265 + goto st42 + st42: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof42 } - st_case_265: -//line plugins/parsers/influx/machine.go:29924 - if ( m.data)[( m.p)] == 92 { - goto st743 + st_case_42: +//line plugins/parsers/influx/machine.go:2879 + if (m.data)[(m.p)] == 92 { + goto st89 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } - goto st742 - st743: + goto st88 + st89: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof743 - } - st_case_743: -//line plugins/parsers/influx/machine.go:29945 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof89 + } + st_case_89: +//line plugins/parsers/influx/machine.go:2895 + switch (m.data)[(m.p)] { case 10: - goto tr1060 - case 12: - goto tr2 + goto tr142 case 13: - goto tr1061 + goto tr143 case 32: goto tr2 case 44: - goto tr1062 + goto tr144 case 61: goto tr2 case 92: - goto st265 + goto st42 + } + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto tr2 } - goto st742 -tr445: + goto st88 + tr69: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st266 - st266: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof266 + goto st43 + st43: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof43 } - st_case_266: -//line plugins/parsers/influx/machine.go:29976 - if ( m.data)[( m.p)] == 92 { - goto st267 + st_case_43: +//line plugins/parsers/influx/machine.go:2925 + if (m.data)[(m.p)] == 92 { + goto st44 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } - goto st263 - st267: + goto st40 + st44: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof267 - } - st_case_267: -//line plugins/parsers/influx/machine.go:29997 - switch ( m.data)[( m.p)] { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof44 + } + st_case_44: +//line plugins/parsers/influx/machine.go:2941 + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: goto tr2 case 61: - goto tr447 + goto tr71 case 92: - goto st266 + goto st43 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } - goto st263 -tr442: + goto st40 + tr66: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st268 - st268: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof268 + goto st45 + st45: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof45 } - st_case_268: -//line plugins/parsers/influx/machine.go:30032 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: + st_case_45: +//line plugins/parsers/influx/machine.go:2971 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto st0 } - goto st740 -tr439: + goto st86 + tr63: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st739 - st739: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof739 + goto st85 + st85: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof85 } - st_case_739: -//line plugins/parsers/influx/machine.go:30055 - switch ( m.data)[( m.p)] { + st_case_85: +//line plugins/parsers/influx/machine.go:2989 + switch (m.data)[(m.p)] { case 10: - goto tr439 + goto tr63 case 13: - goto st258 + goto st35 case 32: - goto st739 + goto st85 case 35: - goto st259 + goto st36 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st739 + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { + goto st85 } - goto tr1053 - st258: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof258 + goto tr135 + st35: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof35 } - st_case_258: - if ( m.data)[( m.p)] == 10 { - goto tr439 + st_case_35: + if (m.data)[(m.p)] == 10 { + goto tr63 } goto st0 - st259: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof259 + st36: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof36 } - st_case_259: - if ( m.data)[( m.p)] == 10 { - goto tr439 + st_case_36: + if (m.data)[(m.p)] == 10 { + goto tr63 } - goto st259 + goto st36 st_out: - _test_eof269: ( m.cs) = 269; goto _test_eof - _test_eof1: ( m.cs) = 1; goto _test_eof - _test_eof2: ( m.cs) = 2; goto _test_eof - _test_eof3: ( m.cs) = 3; goto _test_eof - _test_eof4: ( m.cs) = 4; goto _test_eof - _test_eof5: ( m.cs) = 5; goto _test_eof - _test_eof6: ( m.cs) = 6; goto _test_eof - _test_eof270: ( m.cs) = 270; goto _test_eof - _test_eof271: ( m.cs) = 271; goto _test_eof - _test_eof272: ( m.cs) = 272; goto _test_eof - _test_eof7: ( m.cs) = 7; goto _test_eof - _test_eof8: ( m.cs) = 8; goto _test_eof - _test_eof9: ( m.cs) = 9; goto _test_eof - _test_eof10: ( m.cs) = 10; goto _test_eof - _test_eof11: ( m.cs) = 11; goto _test_eof - _test_eof12: ( m.cs) = 12; goto _test_eof - _test_eof13: ( m.cs) = 13; goto _test_eof - _test_eof14: ( m.cs) = 14; goto _test_eof - _test_eof15: ( m.cs) = 15; goto _test_eof - _test_eof16: ( m.cs) = 16; goto _test_eof - _test_eof17: ( m.cs) = 17; goto _test_eof - _test_eof18: ( m.cs) = 18; goto _test_eof - _test_eof19: ( m.cs) = 19; goto _test_eof - _test_eof20: ( m.cs) = 20; goto _test_eof - _test_eof21: ( m.cs) = 21; goto _test_eof - _test_eof22: ( m.cs) = 22; goto _test_eof - _test_eof23: ( m.cs) = 23; goto _test_eof - _test_eof24: ( m.cs) = 24; goto _test_eof - _test_eof25: ( m.cs) = 25; goto _test_eof - _test_eof26: ( m.cs) = 26; goto _test_eof - _test_eof27: ( m.cs) = 27; goto _test_eof - _test_eof28: ( m.cs) = 28; goto _test_eof - _test_eof29: ( m.cs) = 29; goto _test_eof - _test_eof30: ( m.cs) = 30; goto _test_eof - _test_eof31: ( m.cs) = 31; goto _test_eof - _test_eof273: ( m.cs) = 273; goto _test_eof - _test_eof274: ( m.cs) = 274; goto _test_eof - _test_eof32: ( m.cs) = 32; goto _test_eof - _test_eof33: ( m.cs) = 33; goto _test_eof - _test_eof275: ( m.cs) = 275; goto _test_eof - _test_eof276: ( m.cs) = 276; goto _test_eof - _test_eof277: ( m.cs) = 277; goto _test_eof - _test_eof34: ( m.cs) = 34; goto _test_eof - _test_eof278: ( m.cs) = 278; goto _test_eof - _test_eof279: ( m.cs) = 279; goto _test_eof - _test_eof280: ( m.cs) = 280; goto _test_eof - _test_eof281: ( m.cs) = 281; goto _test_eof - _test_eof282: ( m.cs) = 282; goto _test_eof - _test_eof283: ( m.cs) = 283; goto _test_eof - _test_eof284: ( m.cs) = 284; goto _test_eof - _test_eof285: ( m.cs) = 285; goto _test_eof - _test_eof286: ( m.cs) = 286; goto _test_eof - _test_eof287: ( m.cs) = 287; goto _test_eof - _test_eof288: ( m.cs) = 288; goto _test_eof - _test_eof289: ( m.cs) = 289; goto _test_eof - _test_eof290: ( m.cs) = 290; goto _test_eof - _test_eof291: ( m.cs) = 291; goto _test_eof - _test_eof292: ( m.cs) = 292; goto _test_eof - _test_eof293: ( m.cs) = 293; goto _test_eof - _test_eof294: ( m.cs) = 294; goto _test_eof - _test_eof295: ( m.cs) = 295; goto _test_eof - _test_eof35: ( m.cs) = 35; goto _test_eof - _test_eof36: ( m.cs) = 36; goto _test_eof - _test_eof296: ( m.cs) = 296; goto _test_eof - _test_eof297: ( m.cs) = 297; goto _test_eof - _test_eof298: ( m.cs) = 298; goto _test_eof - _test_eof37: ( m.cs) = 37; goto _test_eof - _test_eof38: ( m.cs) = 38; goto _test_eof - _test_eof39: ( m.cs) = 39; goto _test_eof - _test_eof40: ( m.cs) = 40; goto _test_eof - _test_eof41: ( m.cs) = 41; goto _test_eof - _test_eof299: ( m.cs) = 299; goto _test_eof - _test_eof300: ( m.cs) = 300; goto _test_eof - _test_eof301: ( m.cs) = 301; goto _test_eof - _test_eof302: ( m.cs) = 302; goto _test_eof - _test_eof42: ( m.cs) = 42; goto _test_eof - _test_eof303: ( m.cs) = 303; goto _test_eof - _test_eof304: ( m.cs) = 304; goto _test_eof - _test_eof305: ( m.cs) = 305; goto _test_eof - _test_eof306: ( m.cs) = 306; goto _test_eof - _test_eof307: ( m.cs) = 307; goto _test_eof - _test_eof308: ( m.cs) = 308; goto _test_eof - _test_eof309: ( m.cs) = 309; goto _test_eof - _test_eof310: ( m.cs) = 310; goto _test_eof - _test_eof311: ( m.cs) = 311; goto _test_eof - _test_eof312: ( m.cs) = 312; goto _test_eof - _test_eof313: ( m.cs) = 313; goto _test_eof - _test_eof314: ( m.cs) = 314; goto _test_eof - _test_eof315: ( m.cs) = 315; goto _test_eof - _test_eof316: ( m.cs) = 316; goto _test_eof - _test_eof317: ( m.cs) = 317; goto _test_eof - _test_eof318: ( m.cs) = 318; goto _test_eof - _test_eof319: ( m.cs) = 319; goto _test_eof - _test_eof320: ( m.cs) = 320; goto _test_eof - _test_eof321: ( m.cs) = 321; goto _test_eof - _test_eof322: ( m.cs) = 322; goto _test_eof - _test_eof323: ( m.cs) = 323; goto _test_eof - _test_eof324: ( m.cs) = 324; goto _test_eof - _test_eof43: ( m.cs) = 43; goto _test_eof - _test_eof44: ( m.cs) = 44; goto _test_eof - _test_eof45: ( m.cs) = 45; goto _test_eof - _test_eof46: ( m.cs) = 46; goto _test_eof - _test_eof47: ( m.cs) = 47; goto _test_eof - _test_eof48: ( m.cs) = 48; goto _test_eof - _test_eof49: ( m.cs) = 49; goto _test_eof - _test_eof50: ( m.cs) = 50; goto _test_eof - _test_eof51: ( m.cs) = 51; goto _test_eof - _test_eof52: ( m.cs) = 52; goto _test_eof - _test_eof325: ( m.cs) = 325; goto _test_eof - _test_eof326: ( m.cs) = 326; goto _test_eof - _test_eof327: ( m.cs) = 327; goto _test_eof - _test_eof53: ( m.cs) = 53; goto _test_eof - _test_eof54: ( m.cs) = 54; goto _test_eof - _test_eof55: ( m.cs) = 55; goto _test_eof - _test_eof56: ( m.cs) = 56; goto _test_eof - _test_eof57: ( m.cs) = 57; goto _test_eof - _test_eof58: ( m.cs) = 58; goto _test_eof - _test_eof328: ( m.cs) = 328; goto _test_eof - _test_eof329: ( m.cs) = 329; goto _test_eof - _test_eof59: ( m.cs) = 59; goto _test_eof - _test_eof330: ( m.cs) = 330; goto _test_eof - _test_eof331: ( m.cs) = 331; goto _test_eof - _test_eof332: ( m.cs) = 332; goto _test_eof - _test_eof333: ( m.cs) = 333; goto _test_eof - _test_eof334: ( m.cs) = 334; goto _test_eof - _test_eof335: ( m.cs) = 335; goto _test_eof - _test_eof336: ( m.cs) = 336; goto _test_eof - _test_eof337: ( m.cs) = 337; goto _test_eof - _test_eof338: ( m.cs) = 338; goto _test_eof - _test_eof339: ( m.cs) = 339; goto _test_eof - _test_eof340: ( m.cs) = 340; goto _test_eof - _test_eof341: ( m.cs) = 341; goto _test_eof - _test_eof342: ( m.cs) = 342; goto _test_eof - _test_eof343: ( m.cs) = 343; goto _test_eof - _test_eof344: ( m.cs) = 344; goto _test_eof - _test_eof345: ( m.cs) = 345; goto _test_eof - _test_eof346: ( m.cs) = 346; goto _test_eof - _test_eof347: ( m.cs) = 347; goto _test_eof - _test_eof348: ( m.cs) = 348; goto _test_eof - _test_eof349: ( m.cs) = 349; goto _test_eof - _test_eof60: ( m.cs) = 60; goto _test_eof - _test_eof350: ( m.cs) = 350; goto _test_eof - _test_eof351: ( m.cs) = 351; goto _test_eof - _test_eof352: ( m.cs) = 352; goto _test_eof - _test_eof61: ( m.cs) = 61; goto _test_eof - _test_eof353: ( m.cs) = 353; goto _test_eof - _test_eof354: ( m.cs) = 354; goto _test_eof - _test_eof355: ( m.cs) = 355; goto _test_eof - _test_eof356: ( m.cs) = 356; goto _test_eof - _test_eof357: ( m.cs) = 357; goto _test_eof - _test_eof358: ( m.cs) = 358; goto _test_eof - _test_eof359: ( m.cs) = 359; goto _test_eof - _test_eof360: ( m.cs) = 360; goto _test_eof - _test_eof361: ( m.cs) = 361; goto _test_eof - _test_eof362: ( m.cs) = 362; goto _test_eof - _test_eof363: ( m.cs) = 363; goto _test_eof - _test_eof364: ( m.cs) = 364; goto _test_eof - _test_eof365: ( m.cs) = 365; goto _test_eof - _test_eof366: ( m.cs) = 366; goto _test_eof - _test_eof367: ( m.cs) = 367; goto _test_eof - _test_eof368: ( m.cs) = 368; goto _test_eof - _test_eof369: ( m.cs) = 369; goto _test_eof - _test_eof370: ( m.cs) = 370; goto _test_eof - _test_eof371: ( m.cs) = 371; goto _test_eof - _test_eof372: ( m.cs) = 372; goto _test_eof - _test_eof62: ( m.cs) = 62; goto _test_eof - _test_eof63: ( m.cs) = 63; goto _test_eof - _test_eof64: ( m.cs) = 64; goto _test_eof - _test_eof65: ( m.cs) = 65; goto _test_eof - _test_eof66: ( m.cs) = 66; goto _test_eof - _test_eof373: ( m.cs) = 373; goto _test_eof - _test_eof67: ( m.cs) = 67; goto _test_eof - _test_eof68: ( m.cs) = 68; goto _test_eof - _test_eof69: ( m.cs) = 69; goto _test_eof - _test_eof70: ( m.cs) = 70; goto _test_eof - _test_eof71: ( m.cs) = 71; goto _test_eof - _test_eof374: ( m.cs) = 374; goto _test_eof - _test_eof375: ( m.cs) = 375; goto _test_eof - _test_eof376: ( m.cs) = 376; goto _test_eof - _test_eof72: ( m.cs) = 72; goto _test_eof - _test_eof73: ( m.cs) = 73; goto _test_eof - _test_eof74: ( m.cs) = 74; goto _test_eof - _test_eof377: ( m.cs) = 377; goto _test_eof - _test_eof378: ( m.cs) = 378; goto _test_eof - _test_eof379: ( m.cs) = 379; goto _test_eof - _test_eof75: ( m.cs) = 75; goto _test_eof - _test_eof380: ( m.cs) = 380; goto _test_eof - _test_eof381: ( m.cs) = 381; goto _test_eof - _test_eof382: ( m.cs) = 382; goto _test_eof - _test_eof383: ( m.cs) = 383; goto _test_eof - _test_eof384: ( m.cs) = 384; goto _test_eof - _test_eof385: ( m.cs) = 385; goto _test_eof - _test_eof386: ( m.cs) = 386; goto _test_eof - _test_eof387: ( m.cs) = 387; goto _test_eof - _test_eof388: ( m.cs) = 388; goto _test_eof - _test_eof389: ( m.cs) = 389; goto _test_eof - _test_eof390: ( m.cs) = 390; goto _test_eof - _test_eof391: ( m.cs) = 391; goto _test_eof - _test_eof392: ( m.cs) = 392; goto _test_eof - _test_eof393: ( m.cs) = 393; goto _test_eof - _test_eof394: ( m.cs) = 394; goto _test_eof - _test_eof395: ( m.cs) = 395; goto _test_eof - _test_eof396: ( m.cs) = 396; goto _test_eof - _test_eof397: ( m.cs) = 397; goto _test_eof - _test_eof398: ( m.cs) = 398; goto _test_eof - _test_eof399: ( m.cs) = 399; goto _test_eof - _test_eof76: ( m.cs) = 76; goto _test_eof - _test_eof77: ( m.cs) = 77; goto _test_eof - _test_eof78: ( m.cs) = 78; goto _test_eof - _test_eof79: ( m.cs) = 79; goto _test_eof - _test_eof80: ( m.cs) = 80; goto _test_eof - _test_eof81: ( m.cs) = 81; goto _test_eof - _test_eof82: ( m.cs) = 82; goto _test_eof - _test_eof83: ( m.cs) = 83; goto _test_eof - _test_eof84: ( m.cs) = 84; goto _test_eof - _test_eof85: ( m.cs) = 85; goto _test_eof - _test_eof86: ( m.cs) = 86; goto _test_eof - _test_eof87: ( m.cs) = 87; goto _test_eof - _test_eof88: ( m.cs) = 88; goto _test_eof - _test_eof89: ( m.cs) = 89; goto _test_eof - _test_eof400: ( m.cs) = 400; goto _test_eof - _test_eof401: ( m.cs) = 401; goto _test_eof - _test_eof402: ( m.cs) = 402; goto _test_eof - _test_eof403: ( m.cs) = 403; goto _test_eof - _test_eof90: ( m.cs) = 90; goto _test_eof - _test_eof91: ( m.cs) = 91; goto _test_eof - _test_eof92: ( m.cs) = 92; goto _test_eof - _test_eof93: ( m.cs) = 93; goto _test_eof - _test_eof404: ( m.cs) = 404; goto _test_eof - _test_eof405: ( m.cs) = 405; goto _test_eof - _test_eof94: ( m.cs) = 94; goto _test_eof - _test_eof95: ( m.cs) = 95; goto _test_eof - _test_eof406: ( m.cs) = 406; goto _test_eof - _test_eof96: ( m.cs) = 96; goto _test_eof - _test_eof97: ( m.cs) = 97; goto _test_eof - _test_eof407: ( m.cs) = 407; goto _test_eof - _test_eof408: ( m.cs) = 408; goto _test_eof - _test_eof98: ( m.cs) = 98; goto _test_eof - _test_eof409: ( m.cs) = 409; goto _test_eof - _test_eof410: ( m.cs) = 410; goto _test_eof - _test_eof99: ( m.cs) = 99; goto _test_eof - _test_eof100: ( m.cs) = 100; goto _test_eof - _test_eof411: ( m.cs) = 411; goto _test_eof - _test_eof412: ( m.cs) = 412; goto _test_eof - _test_eof413: ( m.cs) = 413; goto _test_eof - _test_eof414: ( m.cs) = 414; goto _test_eof - _test_eof415: ( m.cs) = 415; goto _test_eof - _test_eof416: ( m.cs) = 416; goto _test_eof - _test_eof417: ( m.cs) = 417; goto _test_eof - _test_eof418: ( m.cs) = 418; goto _test_eof - _test_eof419: ( m.cs) = 419; goto _test_eof - _test_eof420: ( m.cs) = 420; goto _test_eof - _test_eof421: ( m.cs) = 421; goto _test_eof - _test_eof422: ( m.cs) = 422; goto _test_eof - _test_eof423: ( m.cs) = 423; goto _test_eof - _test_eof424: ( m.cs) = 424; goto _test_eof - _test_eof425: ( m.cs) = 425; goto _test_eof - _test_eof426: ( m.cs) = 426; goto _test_eof - _test_eof427: ( m.cs) = 427; goto _test_eof - _test_eof428: ( m.cs) = 428; goto _test_eof - _test_eof101: ( m.cs) = 101; goto _test_eof - _test_eof429: ( m.cs) = 429; goto _test_eof - _test_eof430: ( m.cs) = 430; goto _test_eof - _test_eof431: ( m.cs) = 431; goto _test_eof - _test_eof102: ( m.cs) = 102; goto _test_eof - _test_eof103: ( m.cs) = 103; goto _test_eof - _test_eof432: ( m.cs) = 432; goto _test_eof - _test_eof433: ( m.cs) = 433; goto _test_eof - _test_eof434: ( m.cs) = 434; goto _test_eof - _test_eof104: ( m.cs) = 104; goto _test_eof - _test_eof435: ( m.cs) = 435; goto _test_eof - _test_eof436: ( m.cs) = 436; goto _test_eof - _test_eof437: ( m.cs) = 437; goto _test_eof - _test_eof438: ( m.cs) = 438; goto _test_eof - _test_eof439: ( m.cs) = 439; goto _test_eof - _test_eof440: ( m.cs) = 440; goto _test_eof - _test_eof441: ( m.cs) = 441; goto _test_eof - _test_eof442: ( m.cs) = 442; goto _test_eof - _test_eof443: ( m.cs) = 443; goto _test_eof - _test_eof444: ( m.cs) = 444; goto _test_eof - _test_eof445: ( m.cs) = 445; goto _test_eof - _test_eof446: ( m.cs) = 446; goto _test_eof - _test_eof447: ( m.cs) = 447; goto _test_eof - _test_eof448: ( m.cs) = 448; goto _test_eof - _test_eof449: ( m.cs) = 449; goto _test_eof - _test_eof450: ( m.cs) = 450; goto _test_eof - _test_eof451: ( m.cs) = 451; goto _test_eof - _test_eof452: ( m.cs) = 452; goto _test_eof - _test_eof453: ( m.cs) = 453; goto _test_eof - _test_eof454: ( m.cs) = 454; goto _test_eof - _test_eof105: ( m.cs) = 105; goto _test_eof - _test_eof455: ( m.cs) = 455; goto _test_eof - _test_eof456: ( m.cs) = 456; goto _test_eof - _test_eof457: ( m.cs) = 457; goto _test_eof - _test_eof458: ( m.cs) = 458; goto _test_eof - _test_eof459: ( m.cs) = 459; goto _test_eof - _test_eof460: ( m.cs) = 460; goto _test_eof - _test_eof461: ( m.cs) = 461; goto _test_eof - _test_eof462: ( m.cs) = 462; goto _test_eof - _test_eof463: ( m.cs) = 463; goto _test_eof - _test_eof464: ( m.cs) = 464; goto _test_eof - _test_eof465: ( m.cs) = 465; goto _test_eof - _test_eof466: ( m.cs) = 466; goto _test_eof - _test_eof467: ( m.cs) = 467; goto _test_eof - _test_eof468: ( m.cs) = 468; goto _test_eof - _test_eof469: ( m.cs) = 469; goto _test_eof - _test_eof470: ( m.cs) = 470; goto _test_eof - _test_eof471: ( m.cs) = 471; goto _test_eof - _test_eof472: ( m.cs) = 472; goto _test_eof - _test_eof473: ( m.cs) = 473; goto _test_eof - _test_eof474: ( m.cs) = 474; goto _test_eof - _test_eof475: ( m.cs) = 475; goto _test_eof - _test_eof476: ( m.cs) = 476; goto _test_eof - _test_eof106: ( m.cs) = 106; goto _test_eof - _test_eof107: ( m.cs) = 107; goto _test_eof - _test_eof108: ( m.cs) = 108; goto _test_eof - _test_eof109: ( m.cs) = 109; goto _test_eof - _test_eof110: ( m.cs) = 110; goto _test_eof - _test_eof477: ( m.cs) = 477; goto _test_eof - _test_eof111: ( m.cs) = 111; goto _test_eof - _test_eof478: ( m.cs) = 478; goto _test_eof - _test_eof479: ( m.cs) = 479; goto _test_eof - _test_eof112: ( m.cs) = 112; goto _test_eof - _test_eof480: ( m.cs) = 480; goto _test_eof - _test_eof481: ( m.cs) = 481; goto _test_eof - _test_eof482: ( m.cs) = 482; goto _test_eof - _test_eof483: ( m.cs) = 483; goto _test_eof - _test_eof484: ( m.cs) = 484; goto _test_eof - _test_eof485: ( m.cs) = 485; goto _test_eof - _test_eof486: ( m.cs) = 486; goto _test_eof - _test_eof487: ( m.cs) = 487; goto _test_eof - _test_eof488: ( m.cs) = 488; goto _test_eof - _test_eof113: ( m.cs) = 113; goto _test_eof - _test_eof114: ( m.cs) = 114; goto _test_eof - _test_eof115: ( m.cs) = 115; goto _test_eof - _test_eof489: ( m.cs) = 489; goto _test_eof - _test_eof116: ( m.cs) = 116; goto _test_eof - _test_eof117: ( m.cs) = 117; goto _test_eof - _test_eof118: ( m.cs) = 118; goto _test_eof - _test_eof490: ( m.cs) = 490; goto _test_eof - _test_eof119: ( m.cs) = 119; goto _test_eof - _test_eof120: ( m.cs) = 120; goto _test_eof - _test_eof491: ( m.cs) = 491; goto _test_eof - _test_eof492: ( m.cs) = 492; goto _test_eof - _test_eof121: ( m.cs) = 121; goto _test_eof - _test_eof122: ( m.cs) = 122; goto _test_eof - _test_eof123: ( m.cs) = 123; goto _test_eof - _test_eof124: ( m.cs) = 124; goto _test_eof - _test_eof493: ( m.cs) = 493; goto _test_eof - _test_eof494: ( m.cs) = 494; goto _test_eof - _test_eof495: ( m.cs) = 495; goto _test_eof - _test_eof125: ( m.cs) = 125; goto _test_eof - _test_eof496: ( m.cs) = 496; goto _test_eof - _test_eof497: ( m.cs) = 497; goto _test_eof - _test_eof498: ( m.cs) = 498; goto _test_eof - _test_eof499: ( m.cs) = 499; goto _test_eof - _test_eof500: ( m.cs) = 500; goto _test_eof - _test_eof501: ( m.cs) = 501; goto _test_eof - _test_eof502: ( m.cs) = 502; goto _test_eof - _test_eof503: ( m.cs) = 503; goto _test_eof - _test_eof504: ( m.cs) = 504; goto _test_eof - _test_eof505: ( m.cs) = 505; goto _test_eof - _test_eof506: ( m.cs) = 506; goto _test_eof - _test_eof507: ( m.cs) = 507; goto _test_eof - _test_eof508: ( m.cs) = 508; goto _test_eof - _test_eof509: ( m.cs) = 509; goto _test_eof - _test_eof510: ( m.cs) = 510; goto _test_eof - _test_eof511: ( m.cs) = 511; goto _test_eof - _test_eof512: ( m.cs) = 512; goto _test_eof - _test_eof513: ( m.cs) = 513; goto _test_eof - _test_eof514: ( m.cs) = 514; goto _test_eof - _test_eof515: ( m.cs) = 515; goto _test_eof - _test_eof126: ( m.cs) = 126; goto _test_eof - _test_eof127: ( m.cs) = 127; goto _test_eof - _test_eof516: ( m.cs) = 516; goto _test_eof - _test_eof517: ( m.cs) = 517; goto _test_eof - _test_eof518: ( m.cs) = 518; goto _test_eof - _test_eof519: ( m.cs) = 519; goto _test_eof - _test_eof520: ( m.cs) = 520; goto _test_eof - _test_eof521: ( m.cs) = 521; goto _test_eof - _test_eof522: ( m.cs) = 522; goto _test_eof - _test_eof523: ( m.cs) = 523; goto _test_eof - _test_eof524: ( m.cs) = 524; goto _test_eof - _test_eof128: ( m.cs) = 128; goto _test_eof - _test_eof129: ( m.cs) = 129; goto _test_eof - _test_eof130: ( m.cs) = 130; goto _test_eof - _test_eof525: ( m.cs) = 525; goto _test_eof - _test_eof131: ( m.cs) = 131; goto _test_eof - _test_eof132: ( m.cs) = 132; goto _test_eof - _test_eof133: ( m.cs) = 133; goto _test_eof - _test_eof526: ( m.cs) = 526; goto _test_eof - _test_eof134: ( m.cs) = 134; goto _test_eof - _test_eof135: ( m.cs) = 135; goto _test_eof - _test_eof527: ( m.cs) = 527; goto _test_eof - _test_eof528: ( m.cs) = 528; goto _test_eof - _test_eof136: ( m.cs) = 136; goto _test_eof - _test_eof137: ( m.cs) = 137; goto _test_eof - _test_eof138: ( m.cs) = 138; goto _test_eof - _test_eof529: ( m.cs) = 529; goto _test_eof - _test_eof530: ( m.cs) = 530; goto _test_eof - _test_eof139: ( m.cs) = 139; goto _test_eof - _test_eof531: ( m.cs) = 531; goto _test_eof - _test_eof140: ( m.cs) = 140; goto _test_eof - _test_eof532: ( m.cs) = 532; goto _test_eof - _test_eof533: ( m.cs) = 533; goto _test_eof - _test_eof534: ( m.cs) = 534; goto _test_eof - _test_eof535: ( m.cs) = 535; goto _test_eof - _test_eof536: ( m.cs) = 536; goto _test_eof - _test_eof537: ( m.cs) = 537; goto _test_eof - _test_eof538: ( m.cs) = 538; goto _test_eof - _test_eof539: ( m.cs) = 539; goto _test_eof - _test_eof141: ( m.cs) = 141; goto _test_eof - _test_eof142: ( m.cs) = 142; goto _test_eof - _test_eof143: ( m.cs) = 143; goto _test_eof - _test_eof540: ( m.cs) = 540; goto _test_eof - _test_eof144: ( m.cs) = 144; goto _test_eof - _test_eof145: ( m.cs) = 145; goto _test_eof - _test_eof146: ( m.cs) = 146; goto _test_eof - _test_eof541: ( m.cs) = 541; goto _test_eof - _test_eof147: ( m.cs) = 147; goto _test_eof - _test_eof148: ( m.cs) = 148; goto _test_eof - _test_eof542: ( m.cs) = 542; goto _test_eof - _test_eof543: ( m.cs) = 543; goto _test_eof - _test_eof544: ( m.cs) = 544; goto _test_eof - _test_eof545: ( m.cs) = 545; goto _test_eof - _test_eof546: ( m.cs) = 546; goto _test_eof - _test_eof547: ( m.cs) = 547; goto _test_eof - _test_eof548: ( m.cs) = 548; goto _test_eof - _test_eof549: ( m.cs) = 549; goto _test_eof - _test_eof550: ( m.cs) = 550; goto _test_eof - _test_eof551: ( m.cs) = 551; goto _test_eof - _test_eof552: ( m.cs) = 552; goto _test_eof - _test_eof553: ( m.cs) = 553; goto _test_eof - _test_eof554: ( m.cs) = 554; goto _test_eof - _test_eof555: ( m.cs) = 555; goto _test_eof - _test_eof556: ( m.cs) = 556; goto _test_eof - _test_eof557: ( m.cs) = 557; goto _test_eof - _test_eof558: ( m.cs) = 558; goto _test_eof - _test_eof559: ( m.cs) = 559; goto _test_eof - _test_eof560: ( m.cs) = 560; goto _test_eof - _test_eof561: ( m.cs) = 561; goto _test_eof - _test_eof149: ( m.cs) = 149; goto _test_eof - _test_eof150: ( m.cs) = 150; goto _test_eof - _test_eof562: ( m.cs) = 562; goto _test_eof - _test_eof563: ( m.cs) = 563; goto _test_eof - _test_eof564: ( m.cs) = 564; goto _test_eof - _test_eof151: ( m.cs) = 151; goto _test_eof - _test_eof565: ( m.cs) = 565; goto _test_eof - _test_eof566: ( m.cs) = 566; goto _test_eof - _test_eof152: ( m.cs) = 152; goto _test_eof - _test_eof567: ( m.cs) = 567; goto _test_eof - _test_eof568: ( m.cs) = 568; goto _test_eof - _test_eof569: ( m.cs) = 569; goto _test_eof - _test_eof570: ( m.cs) = 570; goto _test_eof - _test_eof571: ( m.cs) = 571; goto _test_eof - _test_eof572: ( m.cs) = 572; goto _test_eof - _test_eof573: ( m.cs) = 573; goto _test_eof - _test_eof574: ( m.cs) = 574; goto _test_eof - _test_eof575: ( m.cs) = 575; goto _test_eof - _test_eof576: ( m.cs) = 576; goto _test_eof - _test_eof577: ( m.cs) = 577; goto _test_eof - _test_eof578: ( m.cs) = 578; goto _test_eof - _test_eof579: ( m.cs) = 579; goto _test_eof - _test_eof580: ( m.cs) = 580; goto _test_eof - _test_eof581: ( m.cs) = 581; goto _test_eof - _test_eof582: ( m.cs) = 582; goto _test_eof - _test_eof583: ( m.cs) = 583; goto _test_eof - _test_eof584: ( m.cs) = 584; goto _test_eof - _test_eof153: ( m.cs) = 153; goto _test_eof - _test_eof154: ( m.cs) = 154; goto _test_eof - _test_eof585: ( m.cs) = 585; goto _test_eof - _test_eof155: ( m.cs) = 155; goto _test_eof - _test_eof586: ( m.cs) = 586; goto _test_eof - _test_eof587: ( m.cs) = 587; goto _test_eof - _test_eof588: ( m.cs) = 588; goto _test_eof - _test_eof589: ( m.cs) = 589; goto _test_eof - _test_eof590: ( m.cs) = 590; goto _test_eof - _test_eof591: ( m.cs) = 591; goto _test_eof - _test_eof592: ( m.cs) = 592; goto _test_eof - _test_eof593: ( m.cs) = 593; goto _test_eof - _test_eof156: ( m.cs) = 156; goto _test_eof - _test_eof157: ( m.cs) = 157; goto _test_eof - _test_eof158: ( m.cs) = 158; goto _test_eof - _test_eof594: ( m.cs) = 594; goto _test_eof - _test_eof159: ( m.cs) = 159; goto _test_eof - _test_eof160: ( m.cs) = 160; goto _test_eof - _test_eof161: ( m.cs) = 161; goto _test_eof - _test_eof595: ( m.cs) = 595; goto _test_eof - _test_eof162: ( m.cs) = 162; goto _test_eof - _test_eof163: ( m.cs) = 163; goto _test_eof - _test_eof596: ( m.cs) = 596; goto _test_eof - _test_eof597: ( m.cs) = 597; goto _test_eof - _test_eof164: ( m.cs) = 164; goto _test_eof - _test_eof165: ( m.cs) = 165; goto _test_eof - _test_eof166: ( m.cs) = 166; goto _test_eof - _test_eof167: ( m.cs) = 167; goto _test_eof - _test_eof168: ( m.cs) = 168; goto _test_eof - _test_eof169: ( m.cs) = 169; goto _test_eof - _test_eof598: ( m.cs) = 598; goto _test_eof - _test_eof599: ( m.cs) = 599; goto _test_eof - _test_eof600: ( m.cs) = 600; goto _test_eof - _test_eof601: ( m.cs) = 601; goto _test_eof - _test_eof602: ( m.cs) = 602; goto _test_eof - _test_eof603: ( m.cs) = 603; goto _test_eof - _test_eof604: ( m.cs) = 604; goto _test_eof - _test_eof605: ( m.cs) = 605; goto _test_eof - _test_eof606: ( m.cs) = 606; goto _test_eof - _test_eof607: ( m.cs) = 607; goto _test_eof - _test_eof608: ( m.cs) = 608; goto _test_eof - _test_eof609: ( m.cs) = 609; goto _test_eof - _test_eof610: ( m.cs) = 610; goto _test_eof - _test_eof611: ( m.cs) = 611; goto _test_eof - _test_eof612: ( m.cs) = 612; goto _test_eof - _test_eof613: ( m.cs) = 613; goto _test_eof - _test_eof614: ( m.cs) = 614; goto _test_eof - _test_eof615: ( m.cs) = 615; goto _test_eof - _test_eof616: ( m.cs) = 616; goto _test_eof - _test_eof170: ( m.cs) = 170; goto _test_eof - _test_eof171: ( m.cs) = 171; goto _test_eof - _test_eof172: ( m.cs) = 172; goto _test_eof - _test_eof617: ( m.cs) = 617; goto _test_eof - _test_eof618: ( m.cs) = 618; goto _test_eof - _test_eof619: ( m.cs) = 619; goto _test_eof - _test_eof173: ( m.cs) = 173; goto _test_eof - _test_eof620: ( m.cs) = 620; goto _test_eof - _test_eof621: ( m.cs) = 621; goto _test_eof - _test_eof174: ( m.cs) = 174; goto _test_eof - _test_eof622: ( m.cs) = 622; goto _test_eof - _test_eof623: ( m.cs) = 623; goto _test_eof - _test_eof624: ( m.cs) = 624; goto _test_eof - _test_eof625: ( m.cs) = 625; goto _test_eof - _test_eof626: ( m.cs) = 626; goto _test_eof - _test_eof175: ( m.cs) = 175; goto _test_eof - _test_eof176: ( m.cs) = 176; goto _test_eof - _test_eof177: ( m.cs) = 177; goto _test_eof - _test_eof627: ( m.cs) = 627; goto _test_eof - _test_eof178: ( m.cs) = 178; goto _test_eof - _test_eof179: ( m.cs) = 179; goto _test_eof - _test_eof180: ( m.cs) = 180; goto _test_eof - _test_eof628: ( m.cs) = 628; goto _test_eof - _test_eof181: ( m.cs) = 181; goto _test_eof - _test_eof182: ( m.cs) = 182; goto _test_eof - _test_eof629: ( m.cs) = 629; goto _test_eof - _test_eof630: ( m.cs) = 630; goto _test_eof - _test_eof183: ( m.cs) = 183; goto _test_eof - _test_eof631: ( m.cs) = 631; goto _test_eof - _test_eof632: ( m.cs) = 632; goto _test_eof - _test_eof633: ( m.cs) = 633; goto _test_eof - _test_eof184: ( m.cs) = 184; goto _test_eof - _test_eof185: ( m.cs) = 185; goto _test_eof - _test_eof186: ( m.cs) = 186; goto _test_eof - _test_eof634: ( m.cs) = 634; goto _test_eof - _test_eof187: ( m.cs) = 187; goto _test_eof - _test_eof188: ( m.cs) = 188; goto _test_eof - _test_eof189: ( m.cs) = 189; goto _test_eof - _test_eof635: ( m.cs) = 635; goto _test_eof - _test_eof190: ( m.cs) = 190; goto _test_eof - _test_eof191: ( m.cs) = 191; goto _test_eof - _test_eof636: ( m.cs) = 636; goto _test_eof - _test_eof637: ( m.cs) = 637; goto _test_eof - _test_eof192: ( m.cs) = 192; goto _test_eof - _test_eof193: ( m.cs) = 193; goto _test_eof - _test_eof194: ( m.cs) = 194; goto _test_eof - _test_eof638: ( m.cs) = 638; goto _test_eof - _test_eof195: ( m.cs) = 195; goto _test_eof - _test_eof196: ( m.cs) = 196; goto _test_eof - _test_eof639: ( m.cs) = 639; goto _test_eof - _test_eof640: ( m.cs) = 640; goto _test_eof - _test_eof641: ( m.cs) = 641; goto _test_eof - _test_eof642: ( m.cs) = 642; goto _test_eof - _test_eof643: ( m.cs) = 643; goto _test_eof - _test_eof644: ( m.cs) = 644; goto _test_eof - _test_eof645: ( m.cs) = 645; goto _test_eof - _test_eof646: ( m.cs) = 646; goto _test_eof - _test_eof197: ( m.cs) = 197; goto _test_eof - _test_eof198: ( m.cs) = 198; goto _test_eof - _test_eof199: ( m.cs) = 199; goto _test_eof - _test_eof647: ( m.cs) = 647; goto _test_eof - _test_eof200: ( m.cs) = 200; goto _test_eof - _test_eof201: ( m.cs) = 201; goto _test_eof - _test_eof202: ( m.cs) = 202; goto _test_eof - _test_eof648: ( m.cs) = 648; goto _test_eof - _test_eof203: ( m.cs) = 203; goto _test_eof - _test_eof204: ( m.cs) = 204; goto _test_eof - _test_eof649: ( m.cs) = 649; goto _test_eof - _test_eof650: ( m.cs) = 650; goto _test_eof - _test_eof205: ( m.cs) = 205; goto _test_eof - _test_eof206: ( m.cs) = 206; goto _test_eof - _test_eof207: ( m.cs) = 207; goto _test_eof - _test_eof651: ( m.cs) = 651; goto _test_eof - _test_eof652: ( m.cs) = 652; goto _test_eof - _test_eof653: ( m.cs) = 653; goto _test_eof - _test_eof654: ( m.cs) = 654; goto _test_eof - _test_eof655: ( m.cs) = 655; goto _test_eof - _test_eof656: ( m.cs) = 656; goto _test_eof - _test_eof657: ( m.cs) = 657; goto _test_eof - _test_eof658: ( m.cs) = 658; goto _test_eof - _test_eof659: ( m.cs) = 659; goto _test_eof - _test_eof660: ( m.cs) = 660; goto _test_eof - _test_eof661: ( m.cs) = 661; goto _test_eof - _test_eof662: ( m.cs) = 662; goto _test_eof - _test_eof663: ( m.cs) = 663; goto _test_eof - _test_eof664: ( m.cs) = 664; goto _test_eof - _test_eof665: ( m.cs) = 665; goto _test_eof - _test_eof666: ( m.cs) = 666; goto _test_eof - _test_eof667: ( m.cs) = 667; goto _test_eof - _test_eof668: ( m.cs) = 668; goto _test_eof - _test_eof669: ( m.cs) = 669; goto _test_eof - _test_eof208: ( m.cs) = 208; goto _test_eof - _test_eof209: ( m.cs) = 209; goto _test_eof - _test_eof210: ( m.cs) = 210; goto _test_eof - _test_eof211: ( m.cs) = 211; goto _test_eof - _test_eof212: ( m.cs) = 212; goto _test_eof - _test_eof670: ( m.cs) = 670; goto _test_eof - _test_eof213: ( m.cs) = 213; goto _test_eof - _test_eof214: ( m.cs) = 214; goto _test_eof - _test_eof671: ( m.cs) = 671; goto _test_eof - _test_eof672: ( m.cs) = 672; goto _test_eof - _test_eof673: ( m.cs) = 673; goto _test_eof - _test_eof674: ( m.cs) = 674; goto _test_eof - _test_eof675: ( m.cs) = 675; goto _test_eof - _test_eof676: ( m.cs) = 676; goto _test_eof - _test_eof677: ( m.cs) = 677; goto _test_eof - _test_eof678: ( m.cs) = 678; goto _test_eof - _test_eof679: ( m.cs) = 679; goto _test_eof - _test_eof215: ( m.cs) = 215; goto _test_eof - _test_eof216: ( m.cs) = 216; goto _test_eof - _test_eof217: ( m.cs) = 217; goto _test_eof - _test_eof680: ( m.cs) = 680; goto _test_eof - _test_eof218: ( m.cs) = 218; goto _test_eof - _test_eof219: ( m.cs) = 219; goto _test_eof - _test_eof220: ( m.cs) = 220; goto _test_eof - _test_eof681: ( m.cs) = 681; goto _test_eof - _test_eof221: ( m.cs) = 221; goto _test_eof - _test_eof222: ( m.cs) = 222; goto _test_eof - _test_eof682: ( m.cs) = 682; goto _test_eof - _test_eof683: ( m.cs) = 683; goto _test_eof - _test_eof223: ( m.cs) = 223; goto _test_eof - _test_eof224: ( m.cs) = 224; goto _test_eof - _test_eof225: ( m.cs) = 225; goto _test_eof - _test_eof684: ( m.cs) = 684; goto _test_eof - _test_eof226: ( m.cs) = 226; goto _test_eof - _test_eof227: ( m.cs) = 227; goto _test_eof - _test_eof685: ( m.cs) = 685; goto _test_eof - _test_eof686: ( m.cs) = 686; goto _test_eof - _test_eof687: ( m.cs) = 687; goto _test_eof - _test_eof688: ( m.cs) = 688; goto _test_eof - _test_eof689: ( m.cs) = 689; goto _test_eof - _test_eof690: ( m.cs) = 690; goto _test_eof - _test_eof691: ( m.cs) = 691; goto _test_eof - _test_eof692: ( m.cs) = 692; goto _test_eof - _test_eof228: ( m.cs) = 228; goto _test_eof - _test_eof229: ( m.cs) = 229; goto _test_eof - _test_eof230: ( m.cs) = 230; goto _test_eof - _test_eof693: ( m.cs) = 693; goto _test_eof - _test_eof231: ( m.cs) = 231; goto _test_eof - _test_eof232: ( m.cs) = 232; goto _test_eof - _test_eof694: ( m.cs) = 694; goto _test_eof - _test_eof695: ( m.cs) = 695; goto _test_eof - _test_eof696: ( m.cs) = 696; goto _test_eof - _test_eof697: ( m.cs) = 697; goto _test_eof - _test_eof698: ( m.cs) = 698; goto _test_eof - _test_eof699: ( m.cs) = 699; goto _test_eof - _test_eof700: ( m.cs) = 700; goto _test_eof - _test_eof701: ( m.cs) = 701; goto _test_eof - _test_eof233: ( m.cs) = 233; goto _test_eof - _test_eof234: ( m.cs) = 234; goto _test_eof - _test_eof235: ( m.cs) = 235; goto _test_eof - _test_eof702: ( m.cs) = 702; goto _test_eof - _test_eof236: ( m.cs) = 236; goto _test_eof - _test_eof237: ( m.cs) = 237; goto _test_eof - _test_eof238: ( m.cs) = 238; goto _test_eof - _test_eof703: ( m.cs) = 703; goto _test_eof - _test_eof239: ( m.cs) = 239; goto _test_eof - _test_eof240: ( m.cs) = 240; goto _test_eof - _test_eof704: ( m.cs) = 704; goto _test_eof - _test_eof705: ( m.cs) = 705; goto _test_eof - _test_eof241: ( m.cs) = 241; goto _test_eof - _test_eof242: ( m.cs) = 242; goto _test_eof - _test_eof243: ( m.cs) = 243; goto _test_eof - _test_eof706: ( m.cs) = 706; goto _test_eof - _test_eof707: ( m.cs) = 707; goto _test_eof - _test_eof708: ( m.cs) = 708; goto _test_eof - _test_eof709: ( m.cs) = 709; goto _test_eof - _test_eof710: ( m.cs) = 710; goto _test_eof - _test_eof711: ( m.cs) = 711; goto _test_eof - _test_eof712: ( m.cs) = 712; goto _test_eof - _test_eof713: ( m.cs) = 713; goto _test_eof - _test_eof714: ( m.cs) = 714; goto _test_eof - _test_eof715: ( m.cs) = 715; goto _test_eof - _test_eof716: ( m.cs) = 716; goto _test_eof - _test_eof717: ( m.cs) = 717; goto _test_eof - _test_eof718: ( m.cs) = 718; goto _test_eof - _test_eof719: ( m.cs) = 719; goto _test_eof - _test_eof720: ( m.cs) = 720; goto _test_eof - _test_eof721: ( m.cs) = 721; goto _test_eof - _test_eof722: ( m.cs) = 722; goto _test_eof - _test_eof723: ( m.cs) = 723; goto _test_eof - _test_eof724: ( m.cs) = 724; goto _test_eof - _test_eof244: ( m.cs) = 244; goto _test_eof - _test_eof245: ( m.cs) = 245; goto _test_eof - _test_eof725: ( m.cs) = 725; goto _test_eof - _test_eof246: ( m.cs) = 246; goto _test_eof - _test_eof247: ( m.cs) = 247; goto _test_eof - _test_eof726: ( m.cs) = 726; goto _test_eof - _test_eof727: ( m.cs) = 727; goto _test_eof - _test_eof728: ( m.cs) = 728; goto _test_eof - _test_eof729: ( m.cs) = 729; goto _test_eof - _test_eof730: ( m.cs) = 730; goto _test_eof - _test_eof731: ( m.cs) = 731; goto _test_eof - _test_eof732: ( m.cs) = 732; goto _test_eof - _test_eof733: ( m.cs) = 733; goto _test_eof - _test_eof248: ( m.cs) = 248; goto _test_eof - _test_eof249: ( m.cs) = 249; goto _test_eof - _test_eof250: ( m.cs) = 250; goto _test_eof - _test_eof734: ( m.cs) = 734; goto _test_eof - _test_eof251: ( m.cs) = 251; goto _test_eof - _test_eof252: ( m.cs) = 252; goto _test_eof - _test_eof253: ( m.cs) = 253; goto _test_eof - _test_eof735: ( m.cs) = 735; goto _test_eof - _test_eof254: ( m.cs) = 254; goto _test_eof - _test_eof255: ( m.cs) = 255; goto _test_eof - _test_eof736: ( m.cs) = 736; goto _test_eof - _test_eof737: ( m.cs) = 737; goto _test_eof - _test_eof256: ( m.cs) = 256; goto _test_eof - _test_eof257: ( m.cs) = 257; goto _test_eof - _test_eof738: ( m.cs) = 738; goto _test_eof - _test_eof260: ( m.cs) = 260; goto _test_eof - _test_eof740: ( m.cs) = 740; goto _test_eof - _test_eof741: ( m.cs) = 741; goto _test_eof - _test_eof261: ( m.cs) = 261; goto _test_eof - _test_eof262: ( m.cs) = 262; goto _test_eof - _test_eof263: ( m.cs) = 263; goto _test_eof - _test_eof264: ( m.cs) = 264; goto _test_eof - _test_eof742: ( m.cs) = 742; goto _test_eof - _test_eof265: ( m.cs) = 265; goto _test_eof - _test_eof743: ( m.cs) = 743; goto _test_eof - _test_eof266: ( m.cs) = 266; goto _test_eof - _test_eof267: ( m.cs) = 267; goto _test_eof - _test_eof268: ( m.cs) = 268; goto _test_eof - _test_eof739: ( m.cs) = 739; goto _test_eof - _test_eof258: ( m.cs) = 258; goto _test_eof - _test_eof259: ( m.cs) = 259; goto _test_eof - - _test_eof: {} - if ( m.p) == ( m.eof) { - switch ( m.cs) { - case 7, 260: -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 2, 3, 4, 5, 6, 27, 30, 31, 34, 35, 36, 48, 49, 50, 51, 52, 72, 73, 75, 92, 102, 104, 140, 152, 155, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256: -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 12, 13, 14, 21, 23, 24, 262, 263, 264, 265, 266, 267: -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 243: -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 740: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 742, 743: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 270, 271, 272, 273, 274, 276, 277, 296, 297, 298, 300, 301, 304, 305, 326, 327, 328, 329, 331, 375, 376, 378, 379, 401, 402, 407, 408, 410, 430, 431, 433, 434, 456, 457, 617, 620: -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 9, 37, 39, 164, 166: -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 33, 74, 103, 169, 207: -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 19, 43, 44, 45, 57, 58, 60, 62, 67, 69, 70, 76, 77, 78, 83, 85, 87, 88, 96, 97, 99, 100, 101, 106, 107, 108, 121, 122, 136, 137: -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 59: -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 269: -//line plugins/parsers/influx/machine.go.rl:82 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 1: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 299, 302, 306, 374, 398, 399, 403, 404, 405, 529, 563, 564, 566: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 15, 22: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 350, 351, 352, 354, 373, 429, 453, 454, 458, 478, 494, 495, 497: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 623, 674, 688, 728: -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 624, 677, 691, 731: -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 325, 618, 619, 621, 622, 625, 631, 632, 670, 671, 672, 673, 675, 676, 678, 684, 685, 686, 687, 689, 690, 692, 725, 726, 727, 729, 730, 732: -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 626, 627, 628, 629, 630, 633, 634, 635, 636, 637, 679, 680, 681, 682, 683, 733, 734, 735, 736, 737: -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 275, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 330, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 377, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 409, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 432, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724: -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true + _test_eof46: + (m.cs) = 46 + goto _test_eof + _test_eof1: + (m.cs) = 1 + goto _test_eof + _test_eof2: + (m.cs) = 2 + goto _test_eof + _test_eof3: + (m.cs) = 3 + goto _test_eof + _test_eof4: + (m.cs) = 4 + goto _test_eof + _test_eof5: + (m.cs) = 5 + goto _test_eof + _test_eof6: + (m.cs) = 6 + goto _test_eof + _test_eof47: + (m.cs) = 47 + goto _test_eof + _test_eof48: + (m.cs) = 48 + goto _test_eof + _test_eof49: + (m.cs) = 49 + goto _test_eof + _test_eof7: + (m.cs) = 7 + goto _test_eof + _test_eof8: + (m.cs) = 8 + goto _test_eof + _test_eof9: + (m.cs) = 9 + goto _test_eof + _test_eof10: + (m.cs) = 10 + goto _test_eof + _test_eof50: + (m.cs) = 50 + goto _test_eof + _test_eof51: + (m.cs) = 51 + goto _test_eof + _test_eof52: + (m.cs) = 52 + goto _test_eof + _test_eof53: + (m.cs) = 53 + goto _test_eof + _test_eof54: + (m.cs) = 54 + goto _test_eof + _test_eof55: + (m.cs) = 55 + goto _test_eof + _test_eof56: + (m.cs) = 56 + goto _test_eof + _test_eof57: + (m.cs) = 57 + goto _test_eof + _test_eof58: + (m.cs) = 58 + goto _test_eof + _test_eof59: + (m.cs) = 59 + goto _test_eof + _test_eof60: + (m.cs) = 60 + goto _test_eof + _test_eof61: + (m.cs) = 61 + goto _test_eof + _test_eof62: + (m.cs) = 62 + goto _test_eof + _test_eof63: + (m.cs) = 63 + goto _test_eof + _test_eof64: + (m.cs) = 64 + goto _test_eof + _test_eof65: + (m.cs) = 65 + goto _test_eof + _test_eof66: + (m.cs) = 66 + goto _test_eof + _test_eof67: + (m.cs) = 67 + goto _test_eof + _test_eof68: + (m.cs) = 68 + goto _test_eof + _test_eof69: + (m.cs) = 69 + goto _test_eof + _test_eof11: + (m.cs) = 11 + goto _test_eof + _test_eof12: + (m.cs) = 12 + goto _test_eof + _test_eof13: + (m.cs) = 13 + goto _test_eof + _test_eof14: + (m.cs) = 14 + goto _test_eof + _test_eof15: + (m.cs) = 15 + goto _test_eof + _test_eof70: + (m.cs) = 70 + goto _test_eof + _test_eof16: + (m.cs) = 16 + goto _test_eof + _test_eof17: + (m.cs) = 17 + goto _test_eof + _test_eof71: + (m.cs) = 71 + goto _test_eof + _test_eof72: + (m.cs) = 72 + goto _test_eof + _test_eof73: + (m.cs) = 73 + goto _test_eof + _test_eof74: + (m.cs) = 74 + goto _test_eof + _test_eof75: + (m.cs) = 75 + goto _test_eof + _test_eof76: + (m.cs) = 76 + goto _test_eof + _test_eof77: + (m.cs) = 77 + goto _test_eof + _test_eof78: + (m.cs) = 78 + goto _test_eof + _test_eof79: + (m.cs) = 79 + goto _test_eof + _test_eof18: + (m.cs) = 18 + goto _test_eof + _test_eof19: + (m.cs) = 19 + goto _test_eof + _test_eof20: + (m.cs) = 20 + goto _test_eof + _test_eof80: + (m.cs) = 80 + goto _test_eof + _test_eof21: + (m.cs) = 21 + goto _test_eof + _test_eof22: + (m.cs) = 22 + goto _test_eof + _test_eof23: + (m.cs) = 23 + goto _test_eof + _test_eof81: + (m.cs) = 81 + goto _test_eof + _test_eof24: + (m.cs) = 24 + goto _test_eof + _test_eof25: + (m.cs) = 25 + goto _test_eof + _test_eof82: + (m.cs) = 82 + goto _test_eof + _test_eof83: + (m.cs) = 83 + goto _test_eof + _test_eof26: + (m.cs) = 26 + goto _test_eof + _test_eof27: + (m.cs) = 27 + goto _test_eof + _test_eof28: + (m.cs) = 28 + goto _test_eof + _test_eof29: + (m.cs) = 29 + goto _test_eof + _test_eof30: + (m.cs) = 30 + goto _test_eof + _test_eof31: + (m.cs) = 31 + goto _test_eof + _test_eof32: + (m.cs) = 32 + goto _test_eof + _test_eof33: + (m.cs) = 33 + goto _test_eof + _test_eof34: + (m.cs) = 34 + goto _test_eof + _test_eof84: + (m.cs) = 84 + goto _test_eof + _test_eof37: + (m.cs) = 37 + goto _test_eof + _test_eof86: + (m.cs) = 86 + goto _test_eof + _test_eof87: + (m.cs) = 87 + goto _test_eof + _test_eof38: + (m.cs) = 38 + goto _test_eof + _test_eof39: + (m.cs) = 39 + goto _test_eof + _test_eof40: + (m.cs) = 40 + goto _test_eof + _test_eof41: + (m.cs) = 41 + goto _test_eof + _test_eof88: + (m.cs) = 88 + goto _test_eof + _test_eof42: + (m.cs) = 42 + goto _test_eof + _test_eof89: + (m.cs) = 89 + goto _test_eof + _test_eof43: + (m.cs) = 43 + goto _test_eof + _test_eof44: + (m.cs) = 44 + goto _test_eof + _test_eof45: + (m.cs) = 45 + goto _test_eof + _test_eof85: + (m.cs) = 85 + goto _test_eof + _test_eof35: + (m.cs) = 35 + goto _test_eof + _test_eof36: + (m.cs) = 36 + goto _test_eof - case 8: + _test_eof: + { + } + if (m.p) == (m.eof) { + switch m.cs { + case 7, 37: //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 98: -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- + err = ErrNameParse + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 10, 11, 25, 26, 28, 29, 40, 41, 53, 54, 55, 56, 71, 90, 91, 93, 95, 138, 139, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 153, 154, 156, 157, 158, 159, 160, 161, 162, 163, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 534, 588, 696: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 537, 591, 699: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 + err = ErrTagParse + (m.p)-- - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + case 10: +//line plugins/parsers/influx/machine.go.rl:53 -//line plugins/parsers/influx/machine.go.rl:178 + err = ErrTimestampParse + (m.p)-- - m.finishMetric = true + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 406, 530, 531, 532, 533, 535, 536, 538, 562, 585, 586, 587, 589, 590, 592, 693, 694, 695, 697, 698, 700: + case 86: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } -//line plugins/parsers/influx/machine.go.rl:130 + case 88, 89: +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } + case 47, 48, 49, 51: //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true - - case 539, 540, 541, 542, 543, 593, 594, 595, 596, 597, 701, 702, 703, 704, 705: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 + m.finishMetric = true - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + case 46: +//line plugins/parsers/influx/machine.go.rl:82 - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 303, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 400, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 565, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584: + case 1: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } +//line plugins/parsers/influx/machine.go.rl:46 -//line plugins/parsers/influx/machine.go.rl:178 + err = ErrTagParse + (m.p)-- - m.finishMetric = true + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 16, 17, 18, 20, 46, 47, 63, 64, 65, 66, 68, 79, 80, 81, 82, 84, 86, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 123, 124, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204: + case 29, 31: //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } + err = ErrTagParse + (m.p)-- -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 483, 519, 641: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + case 74: //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true - - case 486, 522, 644: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + m.finishMetric = true + case 77: //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true - - case 477, 479, 480, 481, 482, 484, 485, 487, 493, 516, 517, 518, 520, 521, 523, 638, 639, 640, 642, 643, 645: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + m.finishMetric = true + case 70, 71, 72, 73, 75, 76, 78: //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true - - case 488, 489, 490, 491, 492, 524, 525, 526, 527, 528, 646, 647, 648, 649, 650: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + m.finishMetric = true + case 79, 80, 81, 82, 83: //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true - - case 353, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 455, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 496, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + m.finishMetric = true + case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true - - case 38, 165, 167, 168, 205, 206, 241, 242: -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 42, 89, 151: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 61, 105, 125: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- + m.finishMetric = true - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go:31580 +//line plugins/parsers/influx/machine.go:3301 + } } - } - _out: {} + _out: + { + } } //line plugins/parsers/influx/machine.go.rl:415 @@ -31643,7 +3719,7 @@ type streamMachine struct { func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { m := &streamMachine{ machine: NewMachine(handler), - reader: r, + reader: r, } m.machine.SetData(make([]byte, 1024)) @@ -31671,13 +3747,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -31688,6 +3757,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2*len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index f8f40cd7c1dc0..d6b5d949e4065 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -204,7 +204,7 @@ timestamp = ('-'? digit{1,19}) >begin %timestamp; fieldkeychar = - [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r] ); + [^\t\n\v\f\r ,=\\] | ( '\\' [^\t\n\v\f\r] ); fieldkey = fieldkeychar+ >begin %fieldkey; @@ -245,7 +245,7 @@ fieldset = field ( ',' field )*; tagchar = - [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r\\] ) | '\\\\' %to{ fhold; }; + [^\t\n\v\f\r ,=\\] | ( '\\' [^\t\n\v\f\r\\] ) | '\\\\' %to{ fhold; }; tagkey = tagchar+ >begin %tagkey; @@ -257,7 +257,7 @@ tagset = ((',' tagkey '=' tagvalue) $err(tagset_error))*; measurement_chars = - [^\t\n\f\r ,\\] | ( '\\' [^\t\n\f\r] ); + [^\t\n\v\f\r ,\\] | ( '\\' [^\t\n\v\f\r] ); measurement_start = measurement_chars - '#'; @@ -499,13 +499,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -516,6 +509,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index de5353da0c446..e8e0357fdb33f 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -169,35 +169,35 @@ func (h *TestingHandler) Results() []Result { type BenchmarkingHandler struct { } -func (h *BenchmarkingHandler) SetMeasurement(name []byte) error { +func (h *BenchmarkingHandler) SetMeasurement(_ []byte) error { return nil } -func (h *BenchmarkingHandler) AddTag(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddTag(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddInt(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddInt(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddUint(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddUint(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddFloat(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddFloat(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddString(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddString(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddBool(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddBool(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) SetTimestamp(tm []byte) error { +func (h *BenchmarkingHandler) SetTimestamp(_ []byte) error { return nil } @@ -1832,7 +1832,7 @@ func BenchmarkMachine(b *testing.B) { } } -func TestMachineProcstat(t *testing.T) { +func TestMachineProcstat(_ *testing.T) { input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &TestingHandler{} fsm := influx.NewMachine(handler) @@ -2152,7 +2152,7 @@ func TestStreamMachine(t *testing.T) { for _, tt := range tests { tc = append(tc, testcase{ name: tt.name, - input: bytes.NewBuffer([]byte(tt.input)), + input: bytes.NewBuffer(tt.input), results: tt.results, err: tt.err, }) @@ -2191,7 +2191,7 @@ func TestStreamMachinePosition(t *testing.T) { for _, tt := range positionTests { tc = append(tc, testcase{ name: tt.name, - input: bytes.NewBuffer([]byte(tt.input)), + input: bytes.NewBuffer(tt.input), lineno: tt.lineno, column: tt.column, }) diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index f85435ed54644..adc89f407f4d3 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -82,8 +82,8 @@ func NewSeriesParser(handler *MetricHandler) *Parser { } } -func (h *Parser) SetTimeFunc(f TimeFunc) { - h.handler.SetTimeFunc(f) +func (p *Parser) SetTimeFunc(f TimeFunc) { + p.handler.SetTimeFunc(f) } func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { @@ -178,18 +178,18 @@ func NewStreamParser(r io.Reader) *StreamParser { // SetTimeFunc changes the function used to determine the time of metrics // without a timestamp. The default TimeFunc is time.Now. Useful mostly for // testing, or perhaps if you want all metrics to have the same timestamp. -func (h *StreamParser) SetTimeFunc(f TimeFunc) { - h.handler.SetTimeFunc(f) +func (sp *StreamParser) SetTimeFunc(f TimeFunc) { + sp.handler.SetTimeFunc(f) } -func (h *StreamParser) SetTimePrecision(u time.Duration) { - h.handler.SetTimePrecision(u) +func (sp *StreamParser) SetTimePrecision(u time.Duration) { + sp.handler.SetTimePrecision(u) } // Next parses the next item from the stream. You can repeat calls to this // function if it returns ParseError to get the next metric or error. -func (p *StreamParser) Next() (telegraf.Metric, error) { - err := p.machine.Next() +func (sp *StreamParser) Next() (telegraf.Metric, error) { + err := sp.machine.Next() if err == EOF { return nil, err } @@ -200,16 +200,16 @@ func (p *StreamParser) Next() (telegraf.Metric, error) { if err != nil { return nil, &ParseError{ - Offset: p.machine.Position(), - LineOffset: p.machine.LineOffset(), - LineNumber: p.machine.LineNumber(), - Column: p.machine.Column(), + Offset: sp.machine.Position(), + LineOffset: sp.machine.LineOffset(), + LineNumber: sp.machine.LineNumber(), + Column: sp.machine.Column(), msg: err.Error(), - buf: p.machine.LineText(), + buf: sp.machine.LineText(), } } - metric, err := p.handler.Metric() + metric, err := sp.handler.Metric() if err != nil { return nil, err } @@ -218,27 +218,27 @@ func (p *StreamParser) Next() (telegraf.Metric, error) { } // Position returns the current byte offset into the data. -func (p *StreamParser) Position() int { - return p.machine.Position() +func (sp *StreamParser) Position() int { + return sp.machine.Position() } // LineOffset returns the byte offset of the current line. -func (p *StreamParser) LineOffset() int { - return p.machine.LineOffset() +func (sp *StreamParser) LineOffset() int { + return sp.machine.LineOffset() } // LineNumber returns the current line number. Lines are counted based on the // regular expression `\r?\n`. -func (p *StreamParser) LineNumber() int { - return p.machine.LineNumber() +func (sp *StreamParser) LineNumber() int { + return sp.machine.LineNumber() } // Column returns the current column. -func (p *StreamParser) Column() int { - return p.machine.Column() +func (sp *StreamParser) Column() int { + return sp.machine.Column() } // LineText returns the text of the current line that has been parsed so far. -func (p *StreamParser) LineText() string { - return p.machine.LineText() +func (sp *StreamParser) LineText() string { + return sp.machine.LineText() } diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 5c780f070fce5..c5a39801782c1 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -15,13 +15,6 @@ import ( "github.com/stretchr/testify/require" ) -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - var DefaultTime = func() time.Time { return time.Unix(42, 0) } @@ -37,15 +30,13 @@ var ptests = []struct { name: "minimal", input: []byte("cpu value=42 0"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, err: nil, @@ -54,15 +45,13 @@ var ptests = []struct { name: "minimal with newline", input: []byte("cpu value=42 0\n"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, err: nil, @@ -71,15 +60,13 @@ var ptests = []struct { name: "measurement escape space", input: []byte(`c\ pu value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "c pu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "c pu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -88,15 +75,13 @@ var ptests = []struct { name: "measurement escape comma", input: []byte(`c\,pu value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "c,pu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "c,pu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -105,18 +90,16 @@ var ptests = []struct { name: "tags", input: []byte(`cpu,cpu=cpu0,host=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "cpu": "cpu0", - "host": "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "cpu": "cpu0", + "host": "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -125,17 +108,15 @@ var ptests = []struct { name: "tags escape unescapable", input: []byte(`cpu,ho\st=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - `ho\st`: "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + `ho\st`: "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -144,17 +125,15 @@ var ptests = []struct { name: "tags escape equals", input: []byte(`cpu,ho\=st=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "ho=st": "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "ho=st": "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -163,17 +142,15 @@ var ptests = []struct { name: "tags escape comma", input: []byte(`cpu,ho\,st=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "ho,st": "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "ho,st": "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -182,17 +159,15 @@ var ptests = []struct { name: "tag value escape space", input: []byte(`cpu,host=two\ words value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "host": "two words", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "host": "two words", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -201,17 +176,15 @@ var ptests = []struct { name: "tag value double escape space", input: []byte(`cpu,host=two\\ words value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "host": `two\ words`, - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "host": `two\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -220,17 +193,15 @@ var ptests = []struct { name: "tag value triple escape space", input: []byte(`cpu,host=two\\\ words value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "host": `two\\ words`, - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "host": `two\\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -239,15 +210,13 @@ var ptests = []struct { name: "field key escape not escapable", input: []byte(`cpu va\lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va\lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va\lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -256,15 +225,13 @@ var ptests = []struct { name: "field key escape equals", input: []byte(`cpu va\=lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va=lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va=lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -273,15 +240,13 @@ var ptests = []struct { name: "field key escape comma", input: []byte(`cpu va\,lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va,lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va,lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -290,15 +255,13 @@ var ptests = []struct { name: "field key escape space", input: []byte(`cpu va\ lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -307,15 +270,13 @@ var ptests = []struct { name: "field int", input: []byte("cpu value=42i"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(42, 0), ), }, err: nil, @@ -336,15 +297,13 @@ var ptests = []struct { name: "field int max value", input: []byte("cpu value=9223372036854775807i"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": int64(9223372036854775807), - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": int64(9223372036854775807), + }, + time.Unix(42, 0), ), }, err: nil, @@ -353,15 +312,13 @@ var ptests = []struct { name: "field uint", input: []byte("cpu value=42u"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(42), - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(42), + }, + time.Unix(42, 0), ), }, err: nil, @@ -382,15 +339,13 @@ var ptests = []struct { name: "field uint max value", input: []byte("cpu value=18446744073709551615u"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(18446744073709551615), - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(18446744073709551615), + }, + time.Unix(42, 0), ), }, err: nil, @@ -399,15 +354,13 @@ var ptests = []struct { name: "field boolean", input: []byte("cpu value=true"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": true, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": true, + }, + time.Unix(42, 0), ), }, err: nil, @@ -416,15 +369,13 @@ var ptests = []struct { name: "field string", input: []byte(`cpu value="42"`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "42", - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "42", + }, + time.Unix(42, 0), ), }, err: nil, @@ -433,15 +384,13 @@ var ptests = []struct { name: "field string escape quote", input: []byte(`cpu value="how\"dy"`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `value`: `how"dy`, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `value`: `how"dy`, + }, + time.Unix(42, 0), ), }, err: nil, @@ -450,15 +399,13 @@ var ptests = []struct { name: "field string escape backslash", input: []byte(`cpu value="how\\dy"`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `value`: `how\dy`, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `value`: `how\dy`, + }, + time.Unix(42, 0), ), }, err: nil, @@ -467,15 +414,13 @@ var ptests = []struct { name: "field string newline", input: []byte("cpu value=\"4\n2\""), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "4\n2", - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "4\n2", + }, + time.Unix(42, 0), ), }, err: nil, @@ -484,15 +429,13 @@ var ptests = []struct { name: "no timestamp", input: []byte("cpu value=42"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -504,15 +447,13 @@ var ptests = []struct { return time.Unix(42, 123456789) }, metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 123456789), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 123456789), ), }, err: nil, @@ -521,25 +462,21 @@ var ptests = []struct { name: "multiple lines", input: []byte("cpu value=42\ncpu value=42"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -560,69 +497,67 @@ var ptests = []struct { name: "procstat", input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "procstat", - map[string]string{ - "exe": "bash", - "process_name": "bash", - }, - map[string]interface{}{ - "cpu_time": 0, - "cpu_time_guest": float64(0), - "cpu_time_guest_nice": float64(0), - "cpu_time_idle": float64(0), - "cpu_time_iowait": float64(0), - "cpu_time_irq": float64(0), - "cpu_time_nice": float64(0), - "cpu_time_soft_irq": float64(0), - "cpu_time_steal": float64(0), - "cpu_time_system": float64(0), - "cpu_time_user": float64(0.02), - "cpu_usage": float64(0), - "involuntary_context_switches": 2, - "memory_data": 1576960, - "memory_locked": 0, - "memory_rss": 5103616, - "memory_stack": 139264, - "memory_swap": 0, - "memory_vms": 21659648, - "nice_priority": 20, - "num_fds": 4, - "num_threads": 1, - "pid": 29417, - "read_bytes": 0, - "read_count": 259, - "realtime_priority": 0, - "rlimit_cpu_time_hard": 2147483647, - "rlimit_cpu_time_soft": 2147483647, - "rlimit_file_locks_hard": 2147483647, - "rlimit_file_locks_soft": 2147483647, - "rlimit_memory_data_hard": 2147483647, - "rlimit_memory_data_soft": 2147483647, - "rlimit_memory_locked_hard": 65536, - "rlimit_memory_locked_soft": 65536, - "rlimit_memory_rss_hard": 2147483647, - "rlimit_memory_rss_soft": 2147483647, - "rlimit_memory_stack_hard": 2147483647, - "rlimit_memory_stack_soft": 8388608, - "rlimit_memory_vms_hard": 2147483647, - "rlimit_memory_vms_soft": 2147483647, - "rlimit_nice_priority_hard": 0, - "rlimit_nice_priority_soft": 0, - "rlimit_num_fds_hard": 4096, - "rlimit_num_fds_soft": 1024, - "rlimit_realtime_priority_hard": 0, - "rlimit_realtime_priority_soft": 0, - "rlimit_signals_pending_hard": 78994, - "rlimit_signals_pending_soft": 78994, - "signals_pending": 0, - "voluntary_context_switches": 42, - "write_bytes": 106496, - "write_count": 35, - }, - time.Unix(0, 1517620624000000000), - ), + metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), ), }, err: nil, @@ -712,13 +647,11 @@ func TestSeriesParser(t *testing.T) { name: "minimal", input: []byte("cpu"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), ), }, }, @@ -726,16 +659,14 @@ func TestSeriesParser(t *testing.T) { name: "tags", input: []byte("cpu,a=x,b=y"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "a": "x", - "b": "y", - }, - map[string]interface{}{}, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{ + "a": "x", + "b": "y", + }, + map[string]interface{}{}, + time.Unix(0, 0), ), }, }, diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index d39a9d6bf77d9..682a0c62b56cb 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -3,8 +3,8 @@ The JSON data format parses a [JSON][json] object or an array of objects into metric fields. -**NOTE:** All JSON numbers are converted to float fields. JSON String are -ignored unless specified in the `tag_key` or `json_string_fields` options. +**NOTE:** All JSON numbers are converted to float fields. JSON strings and booleans are +ignored unless specified in the `tag_key` or `json_string_fields` options. ### Configuration @@ -30,13 +30,15 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. json_query = "" ## Tag keys is an array of keys that should be added as tags. Matching keys - ## are no longer saved as fields. + ## are no longer saved as fields. Supports wildcard glob matching. tag_keys = [ "my_tag_1", - "my_tag_2" + "my_tag_2", + "tags_*", + "tag*" ] - ## Array of glob pattern strings keys that should be added as string fields. + ## Array of glob pattern strings or booleans keys that should be added as string fields. json_string_fields = [] ## Name key is the key to use as the measurement name. diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index bd9dee869170f..7e138e33adf5c 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -36,7 +36,7 @@ type Config struct { type Parser struct { metricName string - tagKeys []string + tagKeys filter.Filter stringFields filter.Filter nameKey string query string @@ -53,9 +53,14 @@ func New(config *Config) (*Parser, error) { return nil, err } + tagKeyFilter, err := filter.Compile(config.TagKeys) + if err != nil { + return nil, err + } + return &Parser{ metricName: config.MetricName, - tagKeys: config.TagKeys, + tagKeys: tagKeyFilter, nameKey: config.NameKey, stringFields: stringFilter, query: config.Query, @@ -83,7 +88,6 @@ func (p *Parser) parseArray(data []interface{}, timestamp time.Time) ([]telegraf results = append(results, metrics...) default: return nil, ErrWrongType - } } @@ -104,7 +108,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( name := p.metricName - //checks if json_name_key is set + // checks if json_name_key is set if p.nameKey != "" { switch field := f.Fields[p.nameKey].(type) { case string: @@ -112,7 +116,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( } } - //if time key is specified, set timestamp to it + // if time key is specified, set timestamp to it if p.timeKey != "" { if p.timeFormat == "" { err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'") @@ -131,46 +135,48 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( delete(f.Fields, p.timeKey) - //if the year is 0, set to current year + // if the year is 0, set to current year if timestamp.Year() == 0 { timestamp = timestamp.AddDate(time.Now().Year(), 0, 0) } } tags, nFields := p.switchFieldToTag(tags, f.Fields) - metric, err := metric.New(name, tags, nFields, timestamp) - if err != nil { - return nil, err - } - return []telegraf.Metric{metric}, nil + m := metric.New(name, tags, nFields, timestamp) + + return []telegraf.Metric{m}, nil } -//will take in field map with strings and bools, -//search for TagKeys that match fieldnames and add them to tags -//will delete any strings/bools that shouldn't be fields -//assumes that any non-numeric values in TagKeys should be displayed as tags +// will take in field map with strings and bools, +// search for TagKeys that match fieldnames and add them to tags +// will delete any strings/bools that shouldn't be fields +// assumes that any non-numeric values in TagKeys should be displayed as tags func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { - for _, name := range p.tagKeys { - //switch any fields in tagkeys into tags - if fields[name] == nil { + for name, value := range fields { + if p.tagKeys == nil { + continue + } + // skip switch statement if tagkey doesn't match fieldname + if !p.tagKeys.Match(name) { continue } - switch value := fields[name].(type) { + // switch any fields in tagkeys into tags + switch t := value.(type) { case string: - tags[name] = value + tags[name] = t delete(fields, name) case bool: - tags[name] = strconv.FormatBool(value) + tags[name] = strconv.FormatBool(t) delete(fields, name) case float64: - tags[name] = strconv.FormatFloat(value, 'f', -1, 64) + tags[name] = strconv.FormatFloat(t, 'f', -1, 64) delete(fields, name) default: log.Printf("E! [parsers.json] Unrecognized type %T", value) } } - //remove any additional string/bool values from fields + // remove any additional string/bool values from fields for fk := range fields { switch fields[fk].(type) { case string, bool: @@ -187,10 +193,13 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { if p.query != "" { result := gjson.GetBytes(buf, p.query) buf = []byte(result.Raw) - if !result.IsArray() && !result.IsObject() { - err := fmt.Errorf("E! Query path must lead to a JSON object or array of objects, but lead to: %v", result.Type) + if !result.IsArray() && !result.IsObject() && result.Type != gjson.Null { + err := fmt.Errorf("E! Query path must lead to a JSON object, array of objects or null, but lead to: %v", result.Type) return nil, err } + if result.Type == gjson.Null { + return nil, nil + } } buf = bytes.TrimSpace(buf) @@ -211,6 +220,8 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { return p.parseObject(v, timestamp) case []interface{}: return p.parseArray(v, timestamp) + case nil: + return nil, nil default: return nil, ErrWrongType } diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 31c507e7517f7..1010d7971249d 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -24,10 +24,14 @@ const validJSONTags = ` { "a": 5, "b": { - "c": 6 + "c": 6 }, "mytag": "foobar", - "othertag": "baz" + "othertag": "baz", + "tags_object": { + "mytag": "foobar", + "othertag": "baz" + } } ` @@ -39,7 +43,16 @@ const validJSONArrayTags = ` "c": 6 }, "mytag": "foo", - "othertag": "baz" + "othertag": "baz", + "tags_array": [ + { + "mytag": "foo" + }, + { + "othertag": "baz" + } + ], + "anothert": "foo" }, { "a": 7, @@ -47,8 +60,17 @@ const validJSONArrayTags = ` "c": 8 }, "mytag": "bar", + "othertag": "baz", + "tags_array": [ + { + "mytag": "bar" + }, + { "othertag": "baz" -} + } + ], + "anothert": "bar" + } ] ` @@ -870,6 +892,18 @@ func TestParse(t *testing.T) { input: []byte(`[]`), expected: []telegraf.Metric{}, }, + { + name: "parse null", + config: &Config{}, + input: []byte(`null`), + expected: []telegraf.Metric{}, + }, + { + name: "parse null with query", + config: &Config{Query: "result.data"}, + input: []byte(`{"error":null,"result":{"data":null,"items_per_page":10,"total_items":0,"total_pages":0}}`), + expected: []telegraf.Metric{}, + }, { name: "parse simple array", config: &Config{ @@ -948,3 +982,391 @@ func TestParse(t *testing.T) { }) } } + +func TestParseWithWildcardTagKeys(t *testing.T) { + var tests = []struct { + name string + config *Config + input []byte + expected []telegraf.Metric + }{ + { + name: "wildcard matching with tags nested within object", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"tags_object_*"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "wildcard matching with keys containing tag", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"*tag"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "othertag": "baz", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "strings not matching tag keys are still also ignored", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"wrongtagkey", "tags_object_*"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "single tag key is also found and applied", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"mytag", "tags_object_*"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.Parse(tt.input) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } +} + +func TestParseLineWithWildcardTagKeys(t *testing.T) { + var tests = []struct { + name string + config *Config + input string + expected telegraf.Metric + }{ + { + name: "wildcard matching with tags nested within object", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"tags_object_*"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + { + name: "wildcard matching with keys containing tag", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"*tag"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "othertag": "baz", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + { + name: "strings not matching tag keys are ignored", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"wrongtagkey", "tags_object_*"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + { + name: "single tag key is also found and applied", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"mytag", "tags_object_*"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } +} + +func TestParseArrayWithWildcardTagKeys(t *testing.T) { + var tests = []struct { + name string + config *Config + input []byte + expected []telegraf.Metric + }{ + { + name: "wildcard matching with keys containing tag within array works", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"*tag"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "foo", + "othertag": "baz", + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "bar", + "othertag": "baz", + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: " wildcard matching with tags nested array within object works", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"tags_array_*"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "strings not matching tag keys are still also ignored", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"mytag", "*tag"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "foo", + "othertag": "baz", + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "bar", + "othertag": "baz", + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "single tag key is also found and applied", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"anothert", "*tag"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "anothert": "foo", + "mytag": "foo", + "othertag": "baz", + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "anothert": "bar", + "mytag": "bar", + "othertag": "baz", + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.Parse(tt.input) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md new file mode 100644 index 0000000000000..a1effd5940614 --- /dev/null +++ b/plugins/parsers/json_v2/README.md @@ -0,0 +1,187 @@ +# JSON Parser - Version 2 + +This parser takes valid JSON input and turns it into metrics. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. + +## Configuration + +You configure this parser by describing the metric you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. + +**Example configuration:** + +```toml + [[inputs.file]] + urls = [] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "" # A string that will become the new measurement name + measurement_name_path = "" # A string with valid GJSON path syntax, will override measurement_name + timestamp_path = "" # A string with valid GJSON path syntax to a valid timestamp (single value) + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + [[inputs.file.json_v2.tag]] + path = "" # A string with valid GJSON path syntax + rename = "new name" # A string with a new name for the tag key + [[inputs.file.json_v2.field]] + path = "" # A string with valid GJSON path syntax + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + [[inputs.file.json_v2.object]] + path = "" # A string with valid GJSON path syntax + timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + disable_prepend_keys = false (or true, just not both) + included_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + excluded_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that shouldn't be included in result + tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + [inputs.file.json_v2.object.renames] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a new name for the tag key + key = "new name" + [inputs.file.json_v2.object.fields] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a type (int,uint,float,string,bool) + key = "int" +``` +--- +### root config options + +* **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. +* **measurement_name_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a measurement name from the JSON input. The query must return a single data value or it will use the default measurement name. This takes precedence over `measurement_name`. +* **timestamp_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a timestamp from the JSON input. The query must return a single data value or it will default to the current time. +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006` +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` + +--- + +### `field` and `tag` config options + +`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/), which is used to define a `metric`. You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a metric from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling metric that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate metric (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). + +Note that objects are handled separately, therefore if you provide a path that returns a object it will be ignored. You will need use the `object` config table to parse objects, because `field` and `tag` doesn't handle relationships between data. Each `field` and `tag` you define is handled as a separate data point. + +The notable difference between `field` and `tag`, is that `tag` values will always be type string while `field` can be multiple types. You can define the type of `field` to be any [type that line protocol supports](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#data-types-and-format), which are: +* float +* int +* uint +* string +* bool + + +#### **field** + +* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +* **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. +* **type (OPTIONAL)**: You can define a string value to set the desired type (float, int, uint, string, bool). If not defined it won't enforce a type and default to using the original type defined in the JSON (bool, float, or string). + +#### **tag** + +* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +* **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. + +For good examples in using `field` and `tag` you can reference the following example configs: + +* [fields_and_tags](testdata/fields_and_tags/telegraf.conf) +--- +### object + +With the configuration section `object`, you can gather metrics from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). + +The following keys can be set for `object`: + +* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) +* **timestamp_key(OPTIONAL)**: You can define a json key (for a nested key, prepend the parent keys with underscores) for the value to be set as the timestamp from the JSON input. +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006` +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` +* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled +* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the metric, by default it will include everything. +* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the metric, for a nested key, prepend the parent keys with underscores +* **tags (OPTIONAL)**: You can define json keys to be set as tags instead of fields, if you define a key that is an array or object then all nested values will become a tag +* **renames (OPTIONAL)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **fields (OPTIONAL)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type + +## Arrays and Objects + +The following describes the high-level approach when parsing arrays and objects: + +**Array**: Every element in an array is treated as a *separate* metric + +**Object**: Every key/value in a object is treated as a *single* metric + +When handling nested arrays and objects, these above rules continue to apply as the parser creates metrics. When an object has multiple array's as values, the array's will become separate metrics containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. + +Example JSON: + +```json +{ + "book": { + "title": "The Lord Of The Rings", + "chapters": [ + "A Long-expected Party", + "The Shadow of the Past" + ], + "author": "Tolkien", + "characters": [ + { + "name": "Bilbo", + "species": "hobbit" + }, + { + "name": "Frodo", + "species": "hobbit" + } + ], + "random": [ + 1, + 2 + ] + } +} + +``` + +Example configuration: + +```toml +[[inputs.file]] + files = ["./testdata/multiple_arrays_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "book" + tags = ["title"] + disable_prepend_keys = true +``` + +Expected metrics: + +``` +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Bilbo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Frodo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=2 + +``` + +You can find more complicated examples under the folder `testdata`. + +## Types + +For each field you have the option to define the types for each metric. The following rules are in place for this configuration: + +* If a type is explicitly defined, the parser will enforce this type and convert the data to the defined type if possible. If the type can't be converted then the parser will fail. +* If a type isn't defined, the parser will use the default type defined in the JSON (int, float, string) + +The type values you can set: + +* `int`, bool, floats or strings (with valid numbers) can be converted to a int. +* `uint`, bool, floats or strings (with valid numbers) can be converted to a uint. +* `string`, any data can be formatted as a string. +* `float`, string values (with valid numbers) or integers can be converted to a float. +* `bool`, the string values "true" or "false" (regardless of capitalization) or the integer values `0` or `1` can be turned to a bool. diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go new file mode 100644 index 0000000000000..c3fd3a5fbf59b --- /dev/null +++ b/plugins/parsers/json_v2/parser.go @@ -0,0 +1,563 @@ +package json_v2 + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/tidwall/gjson" +) + +type Parser struct { + Configs []Config + DefaultTags map[string]string + Log telegraf.Logger + Timestamp time.Time + + measurementName string + + iterateObjects bool + currentSettings JSONObject +} + +type Config struct { + MeasurementName string `toml:"measurement_name"` // OPTIONAL + MeasurementNamePath string `toml:"measurement_name_path"` // OPTIONAL + TimestampPath string `toml:"timestamp_path"` // OPTIONAL + TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined + TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path + + Fields []DataSet + Tags []DataSet + JSONObjects []JSONObject +} + +type DataSet struct { + Path string `toml:"path"` // REQUIRED + Type string `toml:"type"` // OPTIONAL, can't be set for tags they will always be a string + Rename string `toml:"rename"` // OPTIONAL +} + +type JSONObject struct { + Path string `toml:"path"` // REQUIRED + TimestampKey string `toml:"timestamp_key"` // OPTIONAL + TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined + TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path + Renames map[string]string `toml:"renames"` // OPTIONAL + Fields map[string]string `toml:"fields"` // OPTIONAL + Tags []string `toml:"tags"` // OPTIONAL + IncludedKeys []string `toml:"included_keys"` // OPTIONAL + ExcludedKeys []string `toml:"excluded_keys"` // OPTIONAL + DisablePrependKeys bool `toml:"disable_prepend_keys"` // OPTIONAL +} + +type MetricNode struct { + OutputName string + SetName string + Tag bool + DesiredType string // Can be "int", "uint", "float", "bool", "string" + + Metric telegraf.Metric + gjson.Result +} + +func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { + // Only valid JSON is supported + if !gjson.Valid(string(input)) { + return nil, fmt.Errorf("Invalid JSON provided, unable to parse") + } + + var metrics []telegraf.Metric + + for _, c := range p.Configs { + // Measurement name configuration + p.measurementName = c.MeasurementName + if c.MeasurementNamePath != "" { + result := gjson.GetBytes(input, c.MeasurementNamePath) + if !result.IsArray() && !result.IsObject() { + p.measurementName = result.String() + } + } + + // Timestamp configuration + p.Timestamp = time.Now() + if c.TimestampPath != "" { + result := gjson.GetBytes(input, c.TimestampPath) + if !result.IsArray() && !result.IsObject() { + if c.TimestampFormat == "" { + err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + return nil, err + } + + var err error + p.Timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.Value(), c.TimestampTimezone) + if err != nil { + return nil, err + } + } + } + + fields, err := p.processMetric(c.Fields, input, false) + if err != nil { + return nil, err + } + + tags, err := p.processMetric(c.Tags, input, true) + if err != nil { + return nil, err + } + + objects, err := p.processObjects(c.JSONObjects, input) + if err != nil { + return nil, err + } + + metrics = append(metrics, cartesianProduct(tags, fields)...) + + if len(objects) != 0 && len(metrics) != 0 { + metrics = cartesianProduct(objects, metrics) + } else { + metrics = append(metrics, objects...) + } + } + + for k, v := range p.DefaultTags { + for _, t := range metrics { + t.AddTag(k, v) + } + } + + return metrics, nil +} + +// processMetric will iterate over all 'field' or 'tag' configs and create metrics for each +// A field/tag can either be a single value or an array of values, each resulting in its own metric +// For multiple configs, a set of metrics is created from the cartesian product of each separate config +func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegraf.Metric, error) { + if len(data) == 0 { + return nil, nil + } + + p.iterateObjects = false + var metrics [][]telegraf.Metric + + for _, c := range data { + if c.Path == "" { + return nil, fmt.Errorf("GJSON path is required") + } + result := gjson.GetBytes(input, c.Path) + + if result.IsObject() { + p.Log.Debugf("Found object in the path: %s, ignoring it please use 'object' to gather metrics from objects", c.Path) + continue + } + + setName := c.Rename + // Default to the last path word, should be the upper key name + if setName == "" { + s := strings.Split(c.Path, ".") + setName = s[len(s)-1] + } + setName = strings.ReplaceAll(setName, " ", "_") + + mNode := MetricNode{ + OutputName: setName, + SetName: setName, + DesiredType: c.Type, + Tag: tag, + Metric: metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + p.Timestamp, + ), + Result: result, + } + + // Expand all array's and nested arrays into separate metrics + nodes, err := p.expandArray(mNode) + if err != nil { + return nil, err + } + + metrics = append(metrics, nodes) + } + + for i := 1; i < len(metrics); i++ { + metrics[i] = cartesianProduct(metrics[i-1], metrics[i]) + } + + return metrics[len(metrics)-1], nil +} + +func cartesianProduct(a, b []telegraf.Metric) []telegraf.Metric { + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + p := make([]telegraf.Metric, len(a)*len(b)) + i := 0 + for _, a := range a { + for _, b := range b { + m := a.Copy() + mergeMetric(b, m) + p[i] = m + i++ + } + } + + return p +} + +func mergeMetric(a telegraf.Metric, m telegraf.Metric) { + for _, f := range a.FieldList() { + m.AddField(f.Key, f.Value) + } + for _, t := range a.TagList() { + m.AddTag(t.Key, t.Value) + } +} + +// expandArray will recursively create a new MetricNode for each element in a JSON array or single value +func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { + var results []telegraf.Metric + + if result.IsObject() { + if !p.iterateObjects { + p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") + return results, nil + } + r, err := p.combineObject(result) + if err != nil { + return nil, err + } + results = append(results, r...) + return results, nil + } + + if result.IsArray() { + var err error + result.ForEach(func(_, val gjson.Result) bool { + m := metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + p.Timestamp, + ) + + if val.IsObject() { + if p.iterateObjects { + n := MetricNode{ + SetName: result.SetName, + Metric: m, + Result: val, + } + r, err := p.combineObject(n) + if err != nil { + return false + } + + results = append(results, r...) + } else { + p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") + } + if len(results) != 0 { + for _, newResult := range results { + mergeMetric(result.Metric, newResult) + } + } + return true + } + + for _, f := range result.Metric.FieldList() { + m.AddField(f.Key, f.Value) + } + for _, f := range result.Metric.TagList() { + m.AddTag(f.Key, f.Value) + } + n := MetricNode{ + Tag: result.Tag, + DesiredType: result.DesiredType, + OutputName: result.OutputName, + SetName: result.SetName, + Metric: m, + Result: val, + } + r, err := p.expandArray(n) + if err != nil { + return false + } + results = append(results, r...) + return true + }) + if err != nil { + return nil, err + } + } else { + if result.SetName == p.currentSettings.TimestampKey { + if p.currentSettings.TimestampFormat == "" { + err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + return nil, err + } + timestamp, err := internal.ParseTimestamp(p.currentSettings.TimestampFormat, result.Value(), p.currentSettings.TimestampTimezone) + if err != nil { + return nil, err + } + result.Metric.SetTime(timestamp) + } else { + switch result.Value().(type) { + case nil: // Ignore JSON values that are set as null + default: + if result.Tag { + result.DesiredType = "string" + } + v, err := p.convertType(result.Result, result.DesiredType, result.SetName) + if err != nil { + return nil, err + } + if result.Tag { + result.Metric.AddTag(result.OutputName, v.(string)) + } else { + result.Metric.AddField(result.OutputName, v) + } + } + } + + results = append(results, result.Metric) + } + + return results, nil +} + +// processObjects will iterate over all 'object' configs and create metrics for each +func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf.Metric, error) { + p.iterateObjects = true + var t []telegraf.Metric + for _, c := range objects { + p.currentSettings = c + if c.Path == "" { + return nil, fmt.Errorf("GJSON path is required") + } + result := gjson.GetBytes(input, c.Path) + + if result.Type == gjson.Null { + return nil, fmt.Errorf("GJSON Path returned null") + } + + rootObject := MetricNode{ + Metric: metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + p.Timestamp, + ), + Result: result, + } + metrics, err := p.expandArray(rootObject) + if err != nil { + return nil, err + } + t = append(t, metrics...) + } + + return t, nil +} + +// combineObject will add all fields/tags to a single metric +// If the object has multiple array's as elements it won't comine those, they will remain separate metrics +func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { + var results []telegraf.Metric + if result.IsArray() || result.IsObject() { + var err error + result.ForEach(func(key, val gjson.Result) bool { + // Determine if field/tag set name is configured + var setName string + if result.SetName != "" { + setName = result.SetName + "_" + strings.ReplaceAll(key.String(), " ", "_") + } else { + setName = strings.ReplaceAll(key.String(), " ", "_") + } + + if p.isExcluded(setName) || !p.isIncluded(setName, val) { + return true + } + + var outputName string + if p.currentSettings.DisablePrependKeys { + outputName = strings.ReplaceAll(key.String(), " ", "_") + } else { + outputName = setName + } + for k, n := range p.currentSettings.Renames { + if k == setName { + outputName = n + break + } + } + + arrayNode := MetricNode{ + DesiredType: result.DesiredType, + Tag: result.Tag, + OutputName: outputName, + SetName: setName, + Metric: result.Metric, + Result: val, + } + + for k, t := range p.currentSettings.Fields { + if setName == k { + arrayNode.DesiredType = t + break + } + } + + tag := false + for _, t := range p.currentSettings.Tags { + if setName == t { + tag = true + break + } + } + + arrayNode.Tag = tag + + if val.IsObject() { + results, err = p.combineObject(arrayNode) + if err != nil { + return false + } + } else { + r, err := p.expandArray(arrayNode) + if err != nil { + return false + } + results = cartesianProduct(results, r) + } + + return true + }) + + if err != nil { + return nil, err + } + } + return results, nil +} + +func (p *Parser) isIncluded(key string, val gjson.Result) bool { + if len(p.currentSettings.IncludedKeys) == 0 { + return true + } + // automatically adds tags to included_keys so it does NOT have to be repeated in the config + p.currentSettings.IncludedKeys = append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) + for _, i := range p.currentSettings.IncludedKeys { + if i == key { + return true + } + if val.IsArray() || val.IsObject() { + // Check if the included key is a sub element + if strings.HasPrefix(i, key) { + return true + } + } + } + return false +} + +func (p *Parser) isExcluded(key string) bool { + for _, i := range p.currentSettings.ExcludedKeys { + if i == key { + return true + } + } + return false +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + return nil, fmt.Errorf("ParseLine is designed for parsing influx line protocol, therefore not implemented for parsing JSON") +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +// convertType will convert the value parsed from the input JSON to the specified type in the config +func (p *Parser) convertType(input gjson.Result, desiredType string, name string) (interface{}, error) { + switch inputType := input.Value().(type) { + case string: + if desiredType != "string" { + switch desiredType { + case "uint": + r, err := strconv.ParseUint(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) + } + return r, nil + case "int": + r, err := strconv.ParseInt(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) + } + return r, nil + case "float": + r, err := strconv.ParseFloat(inputType, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) + } + return r, nil + case "bool": + r, err := strconv.ParseBool(inputType) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) + } + return r, nil + } + } + case bool: + switch desiredType { + case "string": + return strconv.FormatBool(inputType), nil + case "int": + if inputType { + return int64(1), nil + } + + return int64(0), nil + case "uint": + if inputType { + return uint64(1), nil + } + + return uint64(0), nil + } + case float64: + if desiredType != "float" { + switch desiredType { + case "string": + return fmt.Sprint(inputType), nil + case "int": + return input.Int(), nil + case "uint": + return input.Uint(), nil + case "bool": + if inputType == 0 { + return false, nil + } else if inputType == 1 { + return true, nil + } else { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) + } + } + } + default: + return nil, fmt.Errorf("unknown format '%T' for field '%s'", inputType, name) + } + + return input.Value(), nil +} diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go new file mode 100644 index 0000000000000..3de93dc22b49f --- /dev/null +++ b/plugins/parsers/json_v2/parser_test.go @@ -0,0 +1,81 @@ +package json_v2_test + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/file" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestMultipleConfigs(t *testing.T) { + // Get all directories in testdata + folders, err := ioutil.ReadDir("testdata") + require.NoError(t, err) + // Make sure testdata contains data + require.Greater(t, len(folders), 0) + + for _, f := range folders { + t.Run(f.Name(), func(t *testing.T) { + // Process the telegraf config file for the test + buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", f.Name())) + require.NoError(t, err) + inputs.Add("file", func() telegraf.Input { + return &file.File{} + }) + cfg := config.NewConfig() + err = cfg.LoadConfigData(buf) + require.NoError(t, err) + + // Gather the metrics from the input file configure + acc := testutil.Accumulator{} + for _, i := range cfg.Inputs { + err = i.Init() + require.NoError(t, err) + err = i.Gather(&acc) + require.NoError(t, err) + } + + // Process expected metrics and compare with resulting metrics + expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", f.Name())) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expectedOutputs, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} + +func readMetricFile(path string) ([]telegraf.Metric, error) { + var metrics []telegraf.Metric + expectedFile, err := os.Open(path) + if err != nil { + return metrics, err + } + defer expectedFile.Close() + + parser := influx.NewParser(influx.NewMetricHandler()) + scanner := bufio.NewScanner(expectedFile) + for scanner.Scan() { + line := scanner.Text() + if line != "" { + m, err := parser.ParseLine(line) + if err != nil { + return nil, fmt.Errorf("unable to parse metric in %q failed: %v", line, err) + } + metrics = append(metrics, m) + } + } + err = expectedFile.Close() + if err != nil { + return metrics, err + } + + return metrics, nil +} diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/expected.out b/plugins/parsers/json_v2/testdata/array_of_objects/expected.out new file mode 100644 index 0000000000000..75f9e5e407f21 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/expected.out @@ -0,0 +1,2 @@ +file properties_mag=5.17 +file properties_mag=6.2 diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/input.json b/plugins/parsers/json_v2/testdata/array_of_objects/input.json new file mode 100644 index 0000000000000..6b43061bcba43 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/input.json @@ -0,0 +1,14 @@ +{ + "features": [ + { + "properties": { + "mag": 5.17 + } + }, + { + "properties": { + "mag": 6.2 + } + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf b/plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf new file mode 100644 index 0000000000000..9a93a1d05a3be --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf @@ -0,0 +1,9 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/array_of_objects/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/expected.out b/plugins/parsers/json_v2/testdata/complex_nesting/expected.out new file mode 100644 index 0000000000000..265549c57abce --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/expected.out @@ -0,0 +1,3 @@ +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=-119.4998333,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=38.5075,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=7.45,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/input.json b/plugins/parsers/json_v2/testdata/complex_nesting/input.json new file mode 100644 index 0000000000000..69bff40a45983 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/input.json @@ -0,0 +1,31 @@ +{ + "type": "FeatureCollection", + "metadata": { + "generated": 1626285886000, + "url": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/significant_week.geojson", + "title": "USGS Significant Earthquakes, Past Week", + "status": 200, + "api": "1.10.3", + "count": 1 + }, + "features": [ + { + "type": "Feature", + "properties": { + "mag": 6, + "place": "Antelope Valley, CA", + "time": 1625784588110, + "updated": 1626277167263 + }, + "geometry": { + "type": "Point", + "coordinates": [ + -119.4998333, + 38.5075, + 7.45 + ] + }, + "id": "nc73584926" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf b/plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf new file mode 100644 index 0000000000000..66347da8410b9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf @@ -0,0 +1,9 @@ +[[inputs.file]] + files = ["./testdata/complex_nesting/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + timestamp_key = "properties_time" + timestamp_format = "unix_ms" + tags = ["properties_place"] diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out b/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out new file mode 100644 index 0000000000000..2b7f6c16834c4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out @@ -0,0 +1,2 @@ +file,status=200 duration=2i,json_duration=100 +file,status=200 duration=2i,json_duration=60 diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/input.json b/plugins/parsers/json_v2/testdata/fields_and_tags/input.json new file mode 100644 index 0000000000000..e8be29f955ca0 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/input.json @@ -0,0 +1,46 @@ +{ + "message": "abc", + "fields": { + "status": 200, + "key": 1, + "json": [ + { + "duration": 100, + "code": 1, + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7, + "nest": { + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 4, + "fields": 7 + } + }, + { + "duration": 60, + "code": 1, + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7, + "nest": { + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7 + } + } + ], + "duration": 2 + } +} diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf b/plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf new file mode 100644 index 0000000000000..ceec731f991bd --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf @@ -0,0 +1,14 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/1363 + +[[inputs.file]] + files = ["./testdata/fields_and_tags/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.tag]] + path = "fields.status" + [[inputs.file.json_v2.field]] + path = "fields.json.#.duration" + rename = "json_duration" + [[inputs.file.json_v2.field]] + path = "fields.duration" + type = "int" diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out new file mode 100644 index 0000000000000..02edaba46ff2f --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out @@ -0,0 +1,5 @@ +bart_json_v2,name=Powell\ St. minutes=9i +bart_json_v2,name=Powell\ St. minutes=40i +bart_json_v2,name=Powell\ St. minutes=70i +bart_json_v2,name=Powell\ St. minutes=12i +bart_json_v2,name=Powell\ St. minutes=42i diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json new file mode 100644 index 0000000000000..15a0dab9519cb --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=POWL&json=y" + }, + "date": "06/03/2021", + "time": "09:46:01 AM PDT", + "station": [ + { + "name": "Powell St.", + "abbr": "POWL", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "9", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "40", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "70", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Berryessa", + "abbreviation": "BERY", + "limited": "0", + "estimate": [ + { + "minutes": "12", + "platform": "2", + "direction": "North", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "42", + "platform": "2", + "direction": "North", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf new file mode 100644 index 0000000000000..e2b655930acce --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf @@ -0,0 +1,10 @@ +[[inputs.file]] + files = ["./testdata/fields_and_tags_complex/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "bart_json_v2" + [[inputs.file.json_v2.tag]] + path = "root.station.#.name" + [[inputs.file.json_v2.field]] + path = "root.station.#.etd.#.estimate.#.minutes" + type = "int" diff --git a/plugins/parsers/json_v2/testdata/large_numbers/expected.out b/plugins/parsers/json_v2/testdata/large_numbers/expected.out new file mode 100644 index 0000000000000..1edb0565f6313 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/expected.out @@ -0,0 +1,3 @@ +file large=4294967296i,larger=9007199254740991i,largest=9223372036854775807i +file large=9007199254740991u,larger=9223372036854775807u,largest=18446744073709551615u +file large=4294967296,larger=4.294967296663e+09,largest=9007199254740991 diff --git a/plugins/parsers/json_v2/testdata/large_numbers/input.json b/plugins/parsers/json_v2/testdata/large_numbers/input.json new file mode 100644 index 0000000000000..a800d0cd0d4e5 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/input.json @@ -0,0 +1,17 @@ +{ + "int": { + "large": 4294967296, + "larger": 9007199254740991, + "largest": 9223372036854775807 + }, + "uint": { + "large": 9007199254740991, + "larger": 9223372036854775807, + "largest": 18446744073709551615 + }, + "float": { + "large": 4294967296, + "larger": 4.294967296663e+09, + "largest": 9007199254740991 + } +} diff --git a/plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf b/plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf new file mode 100644 index 0000000000000..a0b9736a045a6 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf @@ -0,0 +1,22 @@ +[[inputs.file]] + files = ["./testdata/large_numbers/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "int" + [inputs.file.json_v2.object.fields] + large = "int" + larger = "int" + largest = "int" + [[inputs.file.json_v2.object]] + path = "uint" + [inputs.file.json_v2.object.fields] + large = "uint" + larger = "uint" + largest = "uint" + [[inputs.file.json_v2.object]] + path = "float" + [inputs.file.json_v2.object.fields] + large = "float" + larger = "float" + largest = "float" diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out b/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out new file mode 100644 index 0000000000000..4afd678a4b71a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out @@ -0,0 +1 @@ +32 label="Basic" diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/input.json b/plugins/parsers/json_v2/testdata/measurement_name_int/input.json new file mode 100644 index 0000000000000..34dccc621ed15 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/input.json @@ -0,0 +1,19 @@ +{ + "value_id": "52-32-1-0", + "node_id": 52, + "class_id": 32, + "type": "byte", + "genre": "basic", + "instance": 1, + "index": 0, + "label": "Basic", + "units": "", + "help": "Basic status of the node", + "read_only": false, + "write_only": false, + "min": 0, + "max": 255, + "is_polled": false, + "value": 0, + "lastUpdate": 1584636017962 +} diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf b/plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf new file mode 100644 index 0000000000000..db6a86ca197f2 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf @@ -0,0 +1,9 @@ +# Example taken from: https://github.com/influxdata/feature-requests/issues/160 + +[[inputs.file]] + files = ["./testdata/measurement_name_int/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name_path = "class_id" + [[inputs.file.json_v2.field]] + path = "label" diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out new file mode 100644 index 0000000000000..e7f0e222418aa --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out @@ -0,0 +1 @@ +openweather,id=2.643743e+06,name=London coord_lat=51.5085,coord_lon=-0.1257,description="few clouds",main_temp=12.54,summary="Clouds",wind_speed=2.11 1628186541000000000 diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json new file mode 100644 index 0000000000000..402113af8ca9e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json @@ -0,0 +1,44 @@ +{ + "coord": { + "lon": -0.1257, + "lat": 51.5085 + }, + "weather": [ + { + "id": 801, + "main": "Clouds", + "description": "few clouds", + "icon": "02n" + } + ], + "base": "stations", + "main": { + "temp": 12.54, + "feels_like": 11.86, + "temp_min": 10.49, + "temp_max": 14.27, + "pressure": 1024, + "humidity": 77 + }, + "visibility": 10000, + "wind": { + "speed": 2.11, + "deg": 254, + "gust": 4.63 + }, + "clouds": { + "all": 21 + }, + "dt": 1633545358, + "sys": { + "type": 2, + "id": 2019646, + "country": "GB", + "sunrise": 1633500560, + "sunset": 1633541256 + }, + "timezone": 3600, + "id": 2643743, + "name": "London", + "cod": 200 +} diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf new file mode 100644 index 0000000000000..cc181960cbf1e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf @@ -0,0 +1,15 @@ +[[inputs.file]] + files = ["./testdata/mix_field_and_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "openweather" + [[inputs.file.json_v2.field]] + path = "weather.#.main" + rename = "summary" + [[inputs.file.json_v2.field]] + path = "weather.#.description" + [[inputs.file.json_v2.object]] + path = "@this" + included_keys = ["coord_lat", "coord_lon", "main_temp", "wind_speed"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + tags = ["id", "name"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out new file mode 100644 index 0000000000000..2948da1720f64 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out @@ -0,0 +1,9 @@ +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Bilbo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Bilbo",species="hobbit",random=2 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Frodo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Frodo",species="hobbit",random=2 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",species="hobbit",random=2 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=2 + diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json new file mode 100644 index 0000000000000..271638a4f6a33 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json @@ -0,0 +1,24 @@ +{ + "book": { + "title": "The Lord Of The Rings", + "chapters": [ + "A Long-expected Party", + "The Shadow of the Past" + ], + "author": "Tolkien", + "characters": [ + { + "name": "Bilbo", + "species": "hobbit" + }, + { + "name": "Frodo", + "species": "hobbit" + } + ], + "random": [ + 1, + 2 + ] + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf new file mode 100644 index 0000000000000..b83e383adbcc5 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf @@ -0,0 +1,11 @@ +# Example getting nested fields with duplicate names +# Example taken from: https://github.com/influxdata/telegraf/issues/1363 + +[[inputs.file]] + files = ["./testdata/multiple_arrays_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "book" + tags = ["title"] + disable_prepend_keys = true diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out b/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out new file mode 100644 index 0000000000000..0cc5bb93aafcf --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out @@ -0,0 +1,2 @@ +file name="fire" 1555745371450794118 +file name="flood" 1555745371450794118 diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json b/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json new file mode 100644 index 0000000000000..7931dca6635ab --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json @@ -0,0 +1,12 @@ +{ + "events": [ + { + "name": "fire", + "time": "1555745371410" + }, + { + "name": "flood", + "time": "1555745371410" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf b/plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf new file mode 100644 index 0000000000000..da3bae2d373b7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf @@ -0,0 +1,10 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/multiple_timestamps/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "events" + timestamp_key = "time" + timestamp_format = "unix_ms" diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out new file mode 100644 index 0000000000000..d48b7660e28c3 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out @@ -0,0 +1,12 @@ +file,hostname=testhost1,outputname=1A-CC01-PC01 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=2A-CC01-KA01 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=3A-CC01-CC02 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=4A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=5A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=6A-CC01-88-INV01-A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost2,outputname=1A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=2A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=3A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=4A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=5A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=6A systemVoltage=27.5,systemCurrent=9.5 diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json new file mode 100644 index 0000000000000..60d7f24821297 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json @@ -0,0 +1,174 @@ +[ + { + "hostname": "testhost1", + "systemVoltage": -54.1, + "systemCurrent": -3.8, + "tables": [ + { + "outputnumber": 0.0, + "outputname": "1A-CC01-PC01", + "outputcurrent": -2.7, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 1.0, + "outputname": "2A-CC01-KA01", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 2.0, + "outputname": "3A-CC01-CC02", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 3.0, + "outputname": "4A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 4.0, + "outputname": "5A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 5.0, + "outputname": "6A-CC01-88-INV01-A", + "outputcurrent": -1.1, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "busnumber": 0.0, + "busname": "A--48A", + "busvoltage": -54.1, + "buscurrent": -3.8 + }, + { + "busnumber": 1.0, + "busname": "B--48B", + "busvoltage": -53.9, + "buscurrent": -4.2 + }, + { + "alarmnumber": 0.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 1.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 2.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 3.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 4.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + } + ] + }, + { + "hostname": "testhost2", + "systemVoltage": 27.5, + "systemCurrent": 9.5, + "tables": [ + { + "outputnumber": 0.0, + "outputname": "1A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 1.0, + "outputname": "2A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 2.0, + "outputname": "3A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 3.0, + "outputname": "4A", + "outputcurrent": 0.6, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 4.0, + "outputname": "5A", + "outputcurrent": 6.5, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 5.0, + "outputname": "6A", + "outputcurrent": 0.0, + "outputfusestatus": 2.0, + "outputenable": 1.0 + }, + { + "busnumber": 0.0, + "busname": "A-24V", + "busvoltage": 27.6, + "buscurrent": 0.6 + }, + { + "busnumber": 1.0, + "busname": "B-12V", + "busvoltage": 13.8, + "buscurrent": 0.0 + }, + { + "alarmnumber": 0.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 1.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 2.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 3.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 4.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + } + ] + } +] diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf new file mode 100644 index 0000000000000..e1748b463690b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf @@ -0,0 +1,16 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/6437 + +# Parse String types from JSON +[[inputs.file]] +files = ["./testdata/nested_and_nonnested_tags/input.json"] +data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + disable_prepend_keys = true + path = "@this" + included_keys = [ + "systemVoltage", + "systemCurrent", + "tables", + ] + tags = ["hostname", "tables_outputname"] diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out b/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out new file mode 100644 index 0000000000000..972ea5eadd30b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out @@ -0,0 +1,2 @@ +new_metric,name=partition LogEndOffset=339238i,LogStartOffset=339238i,NumLogSegments=1i,Size=0i,UnderReplicatedPartitions=0i 1610056029037925000 +new_metric,name=partition LogEndOffset=33914i,LogStartOffset=33238i,NumLogSegments=1i,Size=2i,UnderReplicatedPartitions=5i 1610056029037956000 diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json b/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json new file mode 100644 index 0000000000000..86ded773af73b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json @@ -0,0 +1,36 @@ +[ + { + "data": { + "LogEndOffset": 339238, + "LogStartOffset": 339238, + "NumLogSegments": 1, + "Size": 0, + "UnderReplicatedPartitions": 0 + }, + "name": "partition", + "tags": { + "host": "CUD1-001559", + "jolokia_agent_url": "http://localhost:7777/jolokia", + "partition": "1", + "topic": "qa-kafka-connect-logs" + }, + "timestamp": 1591124461 + }, + { + "data": { + "LogEndOffset": 33914, + "LogStartOffset": 33238, + "NumLogSegments": 1, + "Size": 2, + "UnderReplicatedPartitions": 5 + }, + "name": "partition", + "tags": { + "host": "CUD1-001559", + "jolokia_agent_url": "http://localhost:7777/jolokia", + "partition": "1", + "topic": "qa-kafka-connect-logs" + }, + "timestamp": 1591124461 + } +] diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf new file mode 100644 index 0000000000000..0dd7960d4ec36 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf @@ -0,0 +1,15 @@ +# Example taken from: https://github.com/influxdata/feature-requests/issues/160 + +[[inputs.file]] + files = ["./testdata/nested_array_of_objects/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "new_metric" + [[inputs.file.json_v2.object]] + path = "@this" + disable_prepend_keys = true + excluded_keys = ["tags", "timestamp"] + tags = ["name"] + [inputs.file.json_v2.object.fields] + data = "int" + diff --git a/plugins/parsers/json_v2/testdata/nested_tags/expected.out b/plugins/parsers/json_v2/testdata/nested_tags/expected.out new file mode 100644 index 0000000000000..7b31560a594bb --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/expected.out @@ -0,0 +1,2 @@ +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGTUT Count=0,Errors=0 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHLPW9T Errors=0,Count=0 diff --git a/plugins/parsers/json_v2/testdata/nested_tags/input.json b/plugins/parsers/json_v2/testdata/nested_tags/input.json new file mode 100644 index 0000000000000..c3226f34d8e14 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/input.json @@ -0,0 +1,16 @@ +{ + "device0": { + "Count": 0, + "Errors": 0, + "Serial": "9JHNGTUT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + }, + "device1": { + "Count": 0, + "Errors": 0, + "Serial": "9JHLPW9T", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } +} diff --git a/plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf new file mode 100644 index 0000000000000..e03bd032552ba --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf @@ -0,0 +1,12 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/6853 + +[[inputs.file]] + files = ["./testdata/nested_tags/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "device0" + tags = ["Firmware", "Model", "Serial"] + [[inputs.file.json_v2.object]] + path = "device1" + tags = ["Firmware", "Model", "Serial"] diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out b/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out new file mode 100644 index 0000000000000..92757bada156d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out @@ -0,0 +1,3 @@ +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGTUT Count=0,Errors=0 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGHJBT Errors=0,Count=2 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHLPW9T Errors=0,Count=0 diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json b/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json new file mode 100644 index 0000000000000..b373d90a387b8 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json @@ -0,0 +1,35 @@ +{ + "Group A": [ + { + "Sub-group 1": [ + { + "Count": 0, + "Errors": 0, + "Serial": "9JHNGTUT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + }, + { + "Count": 2, + "Errors": 0, + "Serial": "9JHNGHJBT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } + ] + } + ], + "Group B": [ + { + "Sub-group 1": [ + { + "Count": 0, + "Errors": 0, + "Serial": "9JHLPW9T", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } + ] + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf new file mode 100644 index 0000000000000..61fba304a4f27 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf @@ -0,0 +1,14 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/6853 + +[[inputs.file]] + files = ["./testdata/nested_tags_complex/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "Group A" + disable_prepend_keys = true + tags = ["Sub-group_1_Firmware", "Sub-group_1_Model", "Sub-group_1_Serial"] + [[inputs.file.json_v2.object]] + path = "Group B" + disable_prepend_keys = true + tags = ["Sub-group_1_Firmware", "Sub-group_1_Model", "Sub-group_1_Serial"] diff --git a/plugins/parsers/json_v2/testdata/null/expected.out b/plugins/parsers/json_v2/testdata/null/expected.out new file mode 100644 index 0000000000000..4f99713cb069f --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/expected.out @@ -0,0 +1 @@ +file,id=ak0217l8ue0x,type=Feature detail="https://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/ak0217l8ue0x.geojson",mag=1.5,place="63 km N of Petersville, Alaska",status="automatic",time=1623708726566,updated=1623709998223,url="https://earthquake.usgs.gov/earthquakes/eventpage/ak0217l8ue0x" diff --git a/plugins/parsers/json_v2/testdata/null/input.json b/plugins/parsers/json_v2/testdata/null/input.json new file mode 100644 index 0000000000000..757f5483c7ebe --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/input.json @@ -0,0 +1,40 @@ +{ + "type": "FeatureCollection", + "metadata": { + "generated": 1623710450000, + "url": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson", + "title": "USGS All Earthquakes, Past Hour", + "status": 200, + "api": "1.10.3", + "count": 10 + }, + "features": [ + { + "type": "Feature", + "properties": { + "mag": 1.5, + "place": "63 km N of Petersville, Alaska", + "time": 1623708726566, + "updated": 1623709998223, + "tz": null, + "url": "https://earthquake.usgs.gov/earthquakes/eventpage/ak0217l8ue0x", + "detail": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/ak0217l8ue0x.geojson", + "felt": null, + "cdi": null, + "mmi": null, + "alert": null, + "status": "automatic" + }, + "id": "ak0217l8ue0x" + } + ], + "bbox": [ + -157.5749, + 32.9001667, + 0.25, + -115.6211667, + 66.331, + 132.5 + ] + } + \ No newline at end of file diff --git a/plugins/parsers/json_v2/testdata/null/telegraf.conf b/plugins/parsers/json_v2/testdata/null/telegraf.conf new file mode 100644 index 0000000000000..a9e55ad1edc41 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/telegraf.conf @@ -0,0 +1,8 @@ +[[inputs.file]] + files = ["./testdata/null/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + tags = ["type", "id"] + disable_prepend_keys = true diff --git a/plugins/parsers/json_v2/testdata/object/expected.out b/plugins/parsers/json_v2/testdata/object/expected.out new file mode 100644 index 0000000000000..8832d32bf0b6c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/expected.out @@ -0,0 +1,5 @@ +bart_json_v2,destination=Antioch,name=Colma minutes=13i +bart_json_v2,destination=Antioch,name=Colma minutes=43i +bart_json_v2,destination=Millbrae,name=Colma minutes=19i +bart_json_v2,destination=Millbrae,name=Colma minutes=49i +bart_json_v2,destination=Millbrae,name=Colma minutes=79i diff --git a/plugins/parsers/json_v2/testdata/object/input.json b/plugins/parsers/json_v2/testdata/object/input.json new file mode 100644 index 0000000000000..cc8b0851f81db --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/input.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&json=y" + }, + "date": "06/03/2021", + "time": "12:54:31 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "13", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "43", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Millbrae", + "abbreviation": "MLBR", + "limited": "0", + "estimate": [ + { + "minutes": "19", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "49", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "79", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/object/telegraf.conf b/plugins/parsers/json_v2/testdata/object/telegraf.conf new file mode 100644 index 0000000000000..50ed245a3cf00 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/telegraf.conf @@ -0,0 +1,12 @@ +[[inputs.file]] + files = ["./testdata/object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "bart_json_v2" + [[inputs.file.json_v2.object]] + path = "root.station" + disable_prepend_keys = true + included_keys = ["etd_estimate_minutes"] + tags = ["name", "etd_destination"] + [inputs.file.json_v2.object.fields] + etd_estimate_minutes = "int" diff --git a/plugins/parsers/json_v2/testdata/timestamp/expected.out b/plugins/parsers/json_v2/testdata/timestamp/expected.out new file mode 100644 index 0000000000000..e2e7415171b27 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/expected.out @@ -0,0 +1,4 @@ +file,name=temperature,units=℃ value=23.4 1555745371450794118 +file,name=moisture,units=% value=5 1555745371450794118 +file,name=light,units=lux value=10118 1555745371450794118 +file,name=fertility,units=us/cm value=0 1555745371450794118 diff --git a/plugins/parsers/json_v2/testdata/timestamp/input.json b/plugins/parsers/json_v2/testdata/timestamp/input.json new file mode 100644 index 0000000000000..356d986e1f193 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/input.json @@ -0,0 +1,25 @@ +{ + "time": 1555745371410, + "measurements": [ + { + "name": "temperature", + "value": 23.4, + "units": "℃" + }, + { + "name": "moisture", + "value": 5, + "units": "%" + }, + { + "name": "light", + "value": 10118, + "units": "lux" + }, + { + "name": "fertility", + "value": 0, + "units": "us/cm" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/timestamp/telegraf.conf b/plugins/parsers/json_v2/testdata/timestamp/telegraf.conf new file mode 100644 index 0000000000000..ffea2d652ffc7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/telegraf.conf @@ -0,0 +1,11 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/timestamp/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "time" + timestamp_format = "unix_ms" + [[inputs.file.json_v2.object]] + path = "measurements" + tags = ["name", "units"] diff --git a/plugins/parsers/json_v2/testdata/types/expected.out b/plugins/parsers/json_v2/testdata/types/expected.out new file mode 100644 index 0000000000000..87cee38ee0e83 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/expected.out @@ -0,0 +1,4 @@ +file explicitstringtypeName="Bilbo",defaultstringtypeName="Baggins",convertbooltostringName="true",convertinttostringName="1",convertfloattostringName="1.1" +file defaultinttypeName=2,convertfloatointName=3i,convertstringtointName=4i,convertbooltointName=0i,explicitinttypeName=1i,uinttype=1u +file convertstringtofloatName=4.1,explicitfloattypeName=1.1,defaultfloattypeName=2.1,convertintotfloatName=3 +file explicitbooltypeName=true,defaultbooltypeName=false,convertinttoboolName=true,convertstringtoboolName=false,convertintstringtoboolTrueName=true,convertintstringtoboolFalseName=false diff --git a/plugins/parsers/json_v2/testdata/types/input.json b/plugins/parsers/json_v2/testdata/types/input.json new file mode 100644 index 0000000000000..bb85fc9eaa9e3 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/input.json @@ -0,0 +1,22 @@ +{ + "explicitstringtype": "Bilbo", + "defaultstringtype": "Baggins", + "convertbooltostring": true, + "convertinttostring": 1, + "convertfloattostring": 1.1, + "explicitinttype": 1, + "defaultinttype": 2, + "convertfloatoint": 3.1, + "convertstringtoint": "4", + "convertbooltoint": false, + "explicitfloattype": 1.1, + "defaultfloattype": 2.1, + "convertintotfloat": 3, + "convertstringtofloat": "4.1", + "explicitbooltype": true, + "defaultbooltype": false, + "convertinttobool": 1, + "convertstringtobool": "false", + "convertintstringtoboolTrue": "1", + "convertintstringtoboolFalse": "0" +} diff --git a/plugins/parsers/json_v2/testdata/types/telegraf.conf b/plugins/parsers/json_v2/testdata/types/telegraf.conf new file mode 100644 index 0000000000000..6a23818193b9d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/telegraf.conf @@ -0,0 +1,105 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/7097 + +# Parse String types from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitstringtypeName" + path = "explicitstringtype" + type = "string" + [[inputs.file.json_v2.field]] + rename = "defaultstringtypeName" + path = "defaultstringtype" + [[inputs.file.json_v2.field]] + rename = "convertbooltostringName" + path = "convertbooltostring" + type = "string" + [[inputs.file.json_v2.field]] + rename = "convertinttostringName" + path = "convertinttostring" + type = "string" + [[inputs.file.json_v2.field]] + rename = "convertfloattostringName" + path = "convertfloattostring" + type = "string" + +# Parse int typess from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitinttypeName" + path = "explicitinttype" + type = "int" + [[inputs.file.json_v2.field]] + rename = "uinttype" + path = "explicitinttype" + type = "uint" + [[inputs.file.json_v2.field]] + rename = "defaultinttypeName" + path = "defaultinttype" + [[inputs.file.json_v2.field]] + rename = "convertfloatointName" + path = "convertfloatoint" + type = "int" + [[inputs.file.json_v2.field]] + rename = "convertstringtointName" + path = "convertstringtoint" + type = "int" + [[inputs.file.json_v2.field]] + rename = "convertbooltointName" + path = "convertbooltoint" + type = "int" + +# Parse float types from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitfloattypeName" + path = "explicitfloattype" + type = "float" + [[inputs.file.json_v2.field]] + rename = "defaultfloattypeName" + path = "defaultfloattype" + [[inputs.file.json_v2.field]] + rename = "convertintotfloatName" + path = "convertintotfloat" + type = "float" + [[inputs.file.json_v2.field]] + rename = "convertstringtofloatName" + path = "convertstringtofloat" + type = "float" + +# Parse bool types from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitbooltypeName" + path = "explicitbooltype" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "defaultbooltypeName" + path = "defaultbooltype" + [[inputs.file.json_v2.field]] + rename = "convertinttoboolName" + path = "convertinttobool" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertstringtoboolName" + path = "convertstringtobool" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertintstringtoboolTrueName" + path = "convertintstringtoboolTrue" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertintstringtoboolFalseName" + path = "convertintstringtoboolFalse" + type = "bool" diff --git a/plugins/parsers/logfmt/parser.go b/plugins/parsers/logfmt/parser.go index 603dbbae862b9..01da916a2850d 100644 --- a/plugins/parsers/logfmt/parser.go +++ b/plugins/parsers/logfmt/parser.go @@ -67,10 +67,7 @@ func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { continue } - m, err := metric.New(p.MetricName, map[string]string{}, fields, p.Now()) - if err != nil { - return nil, err - } + m := metric.New(p.MetricName, map[string]string{}, fields, p.Now()) metrics = append(metrics, m) } diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go index dfacd8c8fae0d..f2a7174891fa9 100644 --- a/plugins/parsers/logfmt/parser_test.go +++ b/plugins/parsers/logfmt/parser_test.go @@ -11,10 +11,8 @@ import ( func MustMetric(t *testing.T, m *testutil.Metric) telegraf.Metric { t.Helper() - v, err := metric.New(m.Measurement, m.Tags, m.Fields, m.Time) - if err != nil { - t.Fatal(err) - } + v := metric.New(m.Measurement, m.Tags, m.Fields, m.Time) + return v } diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index e4058852bf2e2..81e116178bf2b 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -65,10 +65,8 @@ func TryAddState(runErr error, metrics []telegraf.Metric) ([]telegraf.Metric, er f := map[string]interface{}{ "state": state, } - m, err := metric.New("nagios_state", nil, f, ts) - if err != nil { - return metrics, err - } + m := metric.New("nagios_state", nil, f, ts) + metrics = append(metrics, m) return metrics, nil } @@ -166,12 +164,8 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { fields["long_service_output"] = longmsg.String() } - m, err := metric.New("nagios_state", nil, fields, ts) - if err == nil { - metrics = append(metrics, m) - } else { - log.Printf("E! [parser.nagios] failed to add nagios_state: %s\n", err) - } + m := metric.New("nagios_state", nil, fields, ts) + metrics = append(metrics, m) return metrics, nil } @@ -194,7 +188,7 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er fieldName := strings.Trim(perf[1], "'") tags := map[string]string{"perfdata": fieldName} if perf[3] != "" { - str := string(perf[3]) + str := perf[3] if str != "" { tags["unit"] = str } @@ -202,10 +196,10 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er fields := make(map[string]interface{}) if perf[2] == "U" { - return nil, errors.New("Value undetermined") + return nil, errors.New("value undetermined") } - f, err := strconv.ParseFloat(string(perf[2]), 64) + f, err := strconv.ParseFloat(perf[2], 64) if err == nil { fields["value"] = f } @@ -247,12 +241,10 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er } // Create metric - metric, err := metric.New("nagios", tags, fields, timestamp) - if err != nil { - return nil, err - } + m := metric.New("nagios", tags, fields, timestamp) + // Add Metric - metrics = append(metrics, metric) + metrics = append(metrics, m) } return metrics, nil @@ -264,14 +256,14 @@ const ( MinFloat64 = 4.940656458412465441765687928682213723651e-324 // 1 / 2**(1023 - 1 + 52) ) -var ErrBadThresholdFormat = errors.New("Bad threshold format") +var ErrBadThresholdFormat = errors.New("bad threshold format") // Handles all cases from https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT func parseThreshold(threshold string) (min float64, max float64, err error) { thresh := strings.Split(threshold, ":") switch len(thresh) { case 1: - max, err = strconv.ParseFloat(string(thresh[0]), 64) + max, err = strconv.ParseFloat(thresh[0], 64) if err != nil { return 0, 0, ErrBadThresholdFormat } @@ -281,7 +273,7 @@ func parseThreshold(threshold string) (min float64, max float64, err error) { if thresh[0] == "~" { min = MinFloat64 } else { - min, err = strconv.ParseFloat(string(thresh[0]), 64) + min, err = strconv.ParseFloat(thresh[0], 64) if err != nil { min = 0 } @@ -290,7 +282,7 @@ func parseThreshold(threshold string) (min float64, max float64, err error) { if thresh[1] == "" { max = MaxFloat64 } else { - max, err = strconv.ParseFloat(string(thresh[1]), 64) + max, err = strconv.ParseFloat(thresh[1], 64) if err != nil { return 0, 0, ErrBadThresholdFormat } diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index 7f5b5937ec0c9..2173af15214ba 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -65,14 +65,6 @@ func (b *metricBuilder) n(v string) *metricBuilder { return b } -func (b *metricBuilder) t(k, v string) *metricBuilder { - if b.tags == nil { - b.tags = make(map[string]string) - } - b.tags[k] = v - return b -} - func (b *metricBuilder) f(k string, v interface{}) *metricBuilder { if b.fields == nil { b.fields = make(map[string]interface{}) @@ -81,16 +73,8 @@ func (b *metricBuilder) f(k string, v interface{}) *metricBuilder { return b } -func (b *metricBuilder) ts(v time.Time) *metricBuilder { - b.timestamp = v - return b -} - func (b *metricBuilder) b() telegraf.Metric { - m, err := metric.New(b.name, b.tags, b.fields, b.timestamp) - if err != nil { - panic(err) - } + m := metric.New(b.name, b.tags, b.fields, b.timestamp) return m } diff --git a/plugins/parsers/prometheus/README.md b/plugins/parsers/prometheus/README.md new file mode 100644 index 0000000000000..931008e88696d --- /dev/null +++ b/plugins/parsers/prometheus/README.md @@ -0,0 +1,17 @@ +# Prometheus Text-Based Format + +There are no additional configuration options for [Prometheus Text-Based Format][]. The metrics are parsed directly into Telegraf metrics. It is used internally in [prometheus input](/plugins/inputs/prometheus) or can be used in [http_listener_v2](/plugins/inputs/http_listener_v2) to simulate Pushgateway. + +[Prometheus Text-Based Format]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "prometheus" + +``` diff --git a/plugins/parsers/prometheus/common/helpers.go b/plugins/parsers/prometheus/common/helpers.go new file mode 100644 index 0000000000000..bc1be0339dfee --- /dev/null +++ b/plugins/parsers/prometheus/common/helpers.go @@ -0,0 +1,36 @@ +package common + +import ( + "github.com/influxdata/telegraf" + dto "github.com/prometheus/client_model/go" +) + +func ValueType(mt dto.MetricType) telegraf.ValueType { + switch mt { + case dto.MetricType_COUNTER: + return telegraf.Counter + case dto.MetricType_GAUGE: + return telegraf.Gauge + case dto.MetricType_SUMMARY: + return telegraf.Summary + case dto.MetricType_HISTOGRAM: + return telegraf.Histogram + default: + return telegraf.Untyped + } +} + +// Get labels from metric +func MakeLabels(m *dto.Metric, defaultTags map[string]string) map[string]string { + result := map[string]string{} + + for key, value := range defaultTags { + result[key] = value + } + + for _, lp := range m.Label { + result[lp.GetName()] = lp.GetValue() + } + + return result +} diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go new file mode 100644 index 0000000000000..e55789f7957b4 --- /dev/null +++ b/plugins/parsers/prometheus/parser.go @@ -0,0 +1,190 @@ +package prometheus + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "mime" + "net/http" + "time" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +type Parser struct { + DefaultTags map[string]string + Header http.Header +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + var parser expfmt.TextParser + var metrics []telegraf.Metric + var err error + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // Read raw data + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + + // Prepare output + metricFamilies := make(map[string]*dto.MetricFamily) + mediatype, params, err := mime.ParseMediaType(p.Header.Get("Content-Type")) + if err == nil && mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" { + for { + mf := &dto.MetricFamily{} + if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { + if ierr == io.EOF { + break + } + return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr) + } + metricFamilies[mf.GetName()] = mf + } + } else { + metricFamilies, err = parser.TextToMetricFamilies(reader) + if err != nil { + return nil, fmt.Errorf("reading text format failed: %s", err) + } + } + + now := time.Now() + + // read metrics + for metricName, mf := range metricFamilies { + for _, m := range mf.Metric { + // reading tags + tags := common.MakeLabels(m, p.DefaultTags) + + if mf.GetType() == dto.MetricType_SUMMARY { + // summary metric + telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), now) + metrics = append(metrics, telegrafMetrics...) + } else if mf.GetType() == dto.MetricType_HISTOGRAM { + // histogram metric + telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), now) + metrics = append(metrics, telegrafMetrics...) + } else { + // standard metric + // reading fields + fields := getNameAndValue(m, metricName) + // converting to telegraf metric + if len(fields) > 0 { + t := getTimestamp(m, now) + m := metric.New("prometheus", tags, fields, t, common.ValueType(mf.GetType())) + metrics = append(metrics, m) + } + } + } + } + + return metrics, err +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, fmt.Errorf("no metrics in line") + } + + if len(metrics) > 1 { + return nil, fmt.Errorf("more than one metric in line") + } + + return metrics[0], nil +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +// Get Quantiles for summary metric & Buckets for histogram +func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + t := getTimestamp(m, now) + + fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) + met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, met) + + for _, q := range m.GetSummary().Quantile { + newTags := tags + fields = make(map[string]interface{}) + + newTags["quantile"] = fmt.Sprint(q.GetQuantile()) + fields[metricName] = float64(q.GetValue()) + + quantileMetric := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, quantileMetric) + } + return metrics +} + +// Get Buckets from histogram metric +func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + t := getTimestamp(m, now) + + fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) + + met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, met) + + for _, b := range m.GetHistogram().Bucket { + newTags := tags + fields = make(map[string]interface{}) + newTags["le"] = fmt.Sprint(b.GetUpperBound()) + fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) + + histogramMetric := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, histogramMetric) + } + return metrics +} + +// Get name and value from metric +func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} { + fields := make(map[string]interface{}) + if m.Gauge != nil { + if !math.IsNaN(m.GetGauge().GetValue()) { + fields[metricName] = float64(m.GetGauge().GetValue()) + } + } else if m.Counter != nil { + if !math.IsNaN(m.GetCounter().GetValue()) { + fields[metricName] = float64(m.GetCounter().GetValue()) + } + } else if m.Untyped != nil { + if !math.IsNaN(m.GetUntyped().GetValue()) { + fields[metricName] = float64(m.GetUntyped().GetValue()) + } + } + return fields +} + +func getTimestamp(m *dto.Metric, now time.Time) time.Time { + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, m.GetTimestampMs()*1000000) + } else { + t = now + } + return t +} diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go new file mode 100644 index 0000000000000..a403887e093b9 --- /dev/null +++ b/plugins/parsers/prometheus/parser_test.go @@ -0,0 +1,448 @@ +package prometheus + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +const ( + validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. +# TYPE cadvisor_version_info gauge +cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 +` + validUniqueCounter = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source +# TYPE get_token_fail_count counter +get_token_fail_count 0 +` + + validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06 +http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07 +http_request_duration_microseconds_count{handler="prometheus"} 9 +` + + validUniqueHistogram = `# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client. +# TYPE apiserver_request_latencies histogram +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025 +apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 +apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 +` +) + +func TestParsingValidGauge(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "osVersion": "CentOS Linux 7 (Core)", + "cadvisorRevision": "", + "cadvisorVersion": "", + "dockerVersion": "1.8.2", + "kernelVersion": "3.10.0-229.20.1.el7.x86_64", + }, + map[string]interface{}{ + "cadvisor_version_info": float64(1), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + } + + metrics, err := parse([]byte(validUniqueGauge)) + + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestParsingValieCounter(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "get_token_fail_count": float64(0), + }, + time.Unix(0, 0), + telegraf.Counter, + ), + } + + metrics, err := parse([]byte(validUniqueCounter)) + + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestParsingValidSummary(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + }, + map[string]interface{}{ + "http_request_duration_microseconds_sum": float64(1.8909097205e+07), + "http_request_duration_microseconds_count": float64(9.0), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + "quantile": "0.5", + }, + map[string]interface{}{ + "http_request_duration_microseconds": float64(552048.506), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + "quantile": "0.9", + }, + map[string]interface{}{ + "http_request_duration_microseconds": float64(5.876804288e+06), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + "quantile": "0.99", + }, + map[string]interface{}{ + "http_request_duration_microseconds": float64(5.876804288e+6), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + } + + metrics, err := parse([]byte(validUniqueSummary)) + + assert.NoError(t, err) + assert.Len(t, metrics, 4) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestParsingValidHistogram(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + }, + map[string]interface{}{ + "apiserver_request_latencies_count": float64(2025.0), + "apiserver_request_latencies_sum": float64(1.02726334e+08), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "125000", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(1994.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "250000", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(1997.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "500000", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2000.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "1e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2005.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "2e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2012.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "4e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2017.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "8e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2024.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "+Inf", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2025.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + } + + metrics, err := parse([]byte(validUniqueHistogram)) + + assert.NoError(t, err) + assert.Len(t, metrics, 9) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestDefautTags(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "osVersion": "CentOS Linux 7 (Core)", + "cadvisorRevision": "", + "cadvisorVersion": "", + "dockerVersion": "1.8.2", + "kernelVersion": "3.10.0-229.20.1.el7.x86_64", + "defaultTag": "defaultTagValue", + }, + map[string]interface{}{ + "cadvisor_version_info": float64(1), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + } + + parser := Parser{ + DefaultTags: map[string]string{ + "defaultTag": "defaultTagValue", + "dockerVersion": "to_be_overriden", + }, + } + metrics, err := parser.Parse([]byte(validUniqueGauge)) + + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "label": "test", + }, + map[string]interface{}{ + "test_counter": float64(1.0), + }, + testTime, + telegraf.Counter, + ), + } + + metrics, _ := parse([]byte(metricsWithTimestamps)) + + testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) +} + +func parse(buf []byte) ([]telegraf.Metric, error) { + parser := Parser{} + return parser.Parse(buf) +} + +func TestParserProtobufHeader(t *testing.T) { + var uClient = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + }, + } + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_free": 9.77911808e+08, + }, + time.Unix(0, 0), + 2, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_in": 2.031616e+06, + }, + time.Unix(0, 0), + 1, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_out": 1.579008e+07, + }, + time.Unix(0, 0), + 1, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_total": 9.93185792e+08, + }, + time.Unix(0, 0), + 2, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_used": 1.5273984e+07, + }, + time.Unix(0, 0), + 2, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_used_percent": 1.5378778193395661, + }, + time.Unix(0, 0), + 2, + ), + } + sampleProtoBufData := []uint8{67, 10, 9, 115, 119, 97, 112, 95, 102, 114, 101, 101, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 224, 36, 205, 65, 65, 10, 7, 115, 119, 97, 112, 95, 105, 110, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 0, 63, 65, 66, 10, 8, 115, 119, 97, 112, 95, 111, 117, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 30, 110, 65, 68, 10, 10, 115, 119, 97, 112, 95, 116, 111, 116, 97, 108, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 104, 153, 205, 65, 67, 10, 9, 115, 119, 97, 112, 95, 117, 115, 101, 100, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 0, 34, 109, 65, 75, 10, 17, 115, 119, 97, 112, 95, 117, 115, 101, 100, 95, 112, 101, 114, 99, 101, 110, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 109, 234, 180, 197, 37, 155, 248, 63} + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited") + w.Write(sampleProtoBufData) + })) + defer ts.Close() + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatalf("unable to create new request '%s': %s", ts.URL, err) + } + var resp *http.Response + resp, err = uClient.Do(req) + if err != nil { + t.Fatalf("error making HTTP request to %s: %s", ts.URL, err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading body: %s", err) + } + parser := Parser{Header: resp.Header} + metrics, err := parser.Parse(body) + if err != nil { + t.Fatalf("error reading metrics for %s: %s", ts.URL, err) + } + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} diff --git a/plugins/parsers/prometheusremotewrite/README.md b/plugins/parsers/prometheusremotewrite/README.md new file mode 100644 index 0000000000000..6d2c17ef898dc --- /dev/null +++ b/plugins/parsers/prometheusremotewrite/README.md @@ -0,0 +1,46 @@ +# Prometheus remote write + +Converts prometheus remote write samples directly into Telegraf metrics. It can be used with [http_listener_v2](/plugins/inputs/http_listener_v2). There are no additional configuration options for Prometheus Remote Write Samples. + +### Configuration + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":1234" + + ## Paths to listen to. + paths = ["/receive"] + + ## Data format to consume. + data_format = "prometheusremotewrite" +``` + +### Example Input +``` +prompb.WriteRequest{ + Timeseries: []*prompb.TimeSeries{ + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "go_gc_duration_seconds"}, + {Name: "instance", Value: "localhost:9090"}, + {Name: "job", Value: "prometheus"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{ + {Value: 4.63, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + +``` + +### Example Output +``` +prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 +``` + +## For alignment with the [InfluxDB v1.x Prometheus Remote Write Spec](https://docs.influxdata.com/influxdb/v1.8/supported_protocols/prometheus/#how-prometheus-metrics-are-parsed-in-influxdb) + +- Use the [Starlark processor rename prometheus remote write script](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) to rename the measurement name to the fieldname and rename the fieldname to value. diff --git a/plugins/parsers/prometheusremotewrite/parser.go b/plugins/parsers/prometheusremotewrite/parser.go new file mode 100644 index 0000000000000..3b9f25de28680 --- /dev/null +++ b/plugins/parsers/prometheusremotewrite/parser.go @@ -0,0 +1,84 @@ +package prometheusremotewrite + +import ( + "fmt" + "math" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" +) + +type Parser struct { + DefaultTags map[string]string +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + var err error + var metrics []telegraf.Metric + var req prompb.WriteRequest + + if err := req.Unmarshal(buf); err != nil { + return nil, fmt.Errorf("unable to unmarshal request body: %s", err) + } + + now := time.Now() + + for _, ts := range req.Timeseries { + tags := map[string]string{} + for key, value := range p.DefaultTags { + tags[key] = value + } + + for _, l := range ts.Labels { + tags[l.Name] = l.Value + } + + metricName := tags[model.MetricNameLabel] + if metricName == "" { + return nil, fmt.Errorf("metric name %q not found in tag-set or empty", model.MetricNameLabel) + } + delete(tags, model.MetricNameLabel) + + for _, s := range ts.Samples { + fields := make(map[string]interface{}) + if !math.IsNaN(s.Value) { + fields[metricName] = s.Value + } + // converting to telegraf metric + if len(fields) > 0 { + t := now + if s.Timestamp > 0 { + t = time.Unix(0, s.Timestamp*1000000) + } + m := metric.New("prometheus_remote_write", tags, fields, t) + metrics = append(metrics, m) + } + } + } + return metrics, err +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, fmt.Errorf("No metrics in line") + } + + if len(metrics) > 1 { + return nil, fmt.Errorf("More than one metric in line") + } + + return metrics[0], nil +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} diff --git a/plugins/parsers/prometheusremotewrite/parser_test.go b/plugins/parsers/prometheusremotewrite/parser_test.go new file mode 100644 index 0000000000000..7417c9f5fddaf --- /dev/null +++ b/plugins/parsers/prometheusremotewrite/parser_test.go @@ -0,0 +1,157 @@ +package prometheusremotewrite + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + prompbInput := prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "go_gc_duration_seconds"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{ + {Value: 4.63, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "prometheus_target_interval_length_seconds"}, + {Name: "job", Value: "prometheus"}, + }, + Samples: []prompb.Sample{ + {Value: 14.99, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := prompbInput.Marshal() + assert.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "quantile": "0.99", + }, + map[string]interface{}{ + "go_gc_duration_seconds": float64(4.63), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "job": "prometheus", + }, + map[string]interface{}{ + "prometheus_target_interval_length_seconds": float64(14.99), + }, + time.Unix(0, 0), + ), + } + + parser := Parser{ + DefaultTags: map[string]string{}, + } + + metrics, err := parser.Parse(inoutBytes) + assert.NoError(t, err) + assert.Len(t, metrics, 2) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestDefaultTags(t *testing.T) { + prompbInput := prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "foo"}, + {Name: "__eg__", Value: "bar"}, + }, + Samples: []prompb.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := prompbInput.Marshal() + assert.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "defaultTag": "defaultTagValue", + "__eg__": "bar", + }, + map[string]interface{}{ + "foo": float64(1), + }, + time.Unix(0, 0), + ), + } + + parser := Parser{ + DefaultTags: map[string]string{ + "defaultTag": "defaultTagValue", + }, + } + + metrics, err := parser.Parse(inoutBytes) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + prompbInput := prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "foo"}, + {Name: "__eg__", Value: "bar"}, + }, + Samples: []prompb.Sample{ + {Value: 1, Timestamp: testTimeUnix}, + }, + }, + }, + } + + inoutBytes, err := prompbInput.Marshal() + assert.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "__eg__": "bar", + }, + map[string]interface{}{ + "foo": float64(1), + }, + testTime, + ), + } + parser := Parser{ + DefaultTags: map[string]string{}, + } + + metrics, err := parser.Parse(inoutBytes) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 729ed048c0720..f07c789a272f1 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -12,10 +12,14 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/json_v2" "github.com/influxdata/telegraf/plugins/parsers/logfmt" "github.com/influxdata/telegraf/plugins/parsers/nagios" + "github.com/influxdata/telegraf/plugins/parsers/prometheus" + "github.com/influxdata/telegraf/plugins/parsers/prometheusremotewrite" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/plugins/parsers/wavefront" + "github.com/influxdata/telegraf/plugins/parsers/xpath" ) type ParserFunc func() (Parser, error) @@ -48,6 +52,8 @@ type Parser interface { // and parses it into a telegraf metric. // // Must be thread-safe. + // This function is only called by plugins that expect line based protocols + // Doesn't need to be implemented by non-linebased parsers (e.g. json, xml) ParseLine(line string) (telegraf.Metric, error) // SetDefaultTags tells the parser to add all of the given tags @@ -145,9 +151,28 @@ type Config struct { CSVTimestampFormat string `toml:"csv_timestamp_format"` CSVTimezone string `toml:"csv_timezone"` CSVTrimSpace bool `toml:"csv_trim_space"` + CSVSkipValues []string `toml:"csv_skip_values"` // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + + // Value configuration + ValueFieldName string `toml:"value_field_name"` + + // XPath configuration + XPathPrintDocument bool `toml:"xpath_print_document"` + XPathProtobufFile string `toml:"xpath_protobuf_file"` + XPathProtobufType string `toml:"xpath_protobuf_type"` + XPathConfig []XPathConfig + + // JSONPath configuration + JSONV2Config []JSONV2Config `toml:"json_v2"` +} + +type XPathConfig xpath.Config + +type JSONV2Config struct { + json_v2.Config } // NewParser returns a Parser interface based on the given config. @@ -172,7 +197,7 @@ func NewParser(config *Config) (Parser, error) { ) case "value": parser, err = NewValueParser(config.MetricName, - config.DataType, config.DefaultTags) + config.DataType, config.ValueFieldName, config.DefaultTags) case "influx": parser, err = NewInfluxParser() case "nagios": @@ -221,6 +246,7 @@ func NewParser(config *Config) (Parser, error) { TimestampFormat: config.CSVTimestampFormat, Timezone: config.CSVTimezone, DefaultTags: config.DefaultTags, + SkipValues: config.CSVSkipValues, } return csv.NewParser(config) @@ -232,6 +258,21 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags, config.FormUrlencodedTagKeys, ) + case "prometheus": + parser, err = NewPrometheusParser(config.DefaultTags) + case "prometheusremotewrite": + parser, err = NewPrometheusRemoteWriteParser(config.DefaultTags) + case "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf": + parser = &xpath.Parser{ + Format: config.DataFormat, + ProtobufMessageDef: config.XPathProtobufFile, + ProtobufMessageType: config.XPathProtobufType, + PrintDocument: config.XPathPrintDocument, + DefaultTags: config.DefaultTags, + Configs: NewXPathParserConfigs(config.MetricName, config.XPathConfig), + } + case "json_v2": + parser, err = NewJSONPathParser(config.JSONV2Config) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -276,13 +317,10 @@ func NewGraphiteParser( func NewValueParser( metricName string, dataType string, + fieldName string, defaultTags map[string]string, ) (Parser, error) { - return &value.ValueParser{ - MetricName: metricName, - DataType: dataType, - DefaultTags: defaultTags, - }, nil + return value.NewValueParser(metricName, dataType, fieldName, defaultTags), nil } func NewCollectdParser( @@ -339,3 +377,46 @@ func NewFormUrlencodedParser( TagKeys: tagKeys, }, nil } + +func NewPrometheusParser(defaultTags map[string]string) (Parser, error) { + return &prometheus.Parser{ + DefaultTags: defaultTags, + }, nil +} + +func NewPrometheusRemoteWriteParser(defaultTags map[string]string) (Parser, error) { + return &prometheusremotewrite.Parser{ + DefaultTags: defaultTags, + }, nil +} + +func NewXPathParserConfigs(metricName string, cfgs []XPathConfig) []xpath.Config { + // Convert the config formats which is a one-to-one copy + configs := make([]xpath.Config, 0, len(cfgs)) + for _, cfg := range cfgs { + config := xpath.Config(cfg) + config.MetricDefaultName = metricName + configs = append(configs, config) + } + return configs +} + +func NewJSONPathParser(jsonv2config []JSONV2Config) (Parser, error) { + configs := make([]json_v2.Config, len(jsonv2config)) + for i, cfg := range jsonv2config { + configs[i].MeasurementName = cfg.MeasurementName + configs[i].MeasurementNamePath = cfg.MeasurementNamePath + + configs[i].TimestampPath = cfg.TimestampPath + configs[i].TimestampFormat = cfg.TimestampFormat + configs[i].TimestampTimezone = cfg.TimestampTimezone + + configs[i].Fields = cfg.Fields + configs[i].Tags = cfg.Tags + + configs[i].JSONObjects = cfg.JSONObjects + } + return &json_v2.Parser{ + Configs: configs, + }, nil +} diff --git a/plugins/parsers/value/parser.go b/plugins/parsers/value/parser.go index a495033c47b28..dc496663e98d9 100644 --- a/plugins/parsers/value/parser.go +++ b/plugins/parsers/value/parser.go @@ -15,6 +15,7 @@ type ValueParser struct { MetricName string DataType string DefaultTags map[string]string + FieldName string } func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { @@ -27,7 +28,7 @@ func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { if len(values) < 1 { return []telegraf.Metric{}, nil } - vStr = string(values[len(values)-1]) + vStr = values[len(values)-1] } var value interface{} @@ -46,14 +47,11 @@ func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { return nil, err } - fields := map[string]interface{}{"value": value} - metric, err := metric.New(v.MetricName, v.DefaultTags, + fields := map[string]interface{}{v.FieldName: value} + m := metric.New(v.MetricName, v.DefaultTags, fields, time.Now().UTC()) - if err != nil { - return nil, err - } - return []telegraf.Metric{metric}, nil + return []telegraf.Metric{m}, nil } func (v *ValueParser) ParseLine(line string) (telegraf.Metric, error) { @@ -64,7 +62,7 @@ func (v *ValueParser) ParseLine(line string) (telegraf.Metric, error) { } if len(metrics) < 1 { - return nil, fmt.Errorf("Can not parse the line: %s, for data format: value", line) + return nil, fmt.Errorf("can not parse the line: %s, for data format: value", line) } return metrics[0], nil @@ -73,3 +71,16 @@ func (v *ValueParser) ParseLine(line string) (telegraf.Metric, error) { func (v *ValueParser) SetDefaultTags(tags map[string]string) { v.DefaultTags = tags } + +func NewValueParser(metricName, dataType, fieldName string, defaultTags map[string]string) *ValueParser { + if fieldName == "" { + fieldName = "value" + } + + return &ValueParser{ + MetricName: metricName, + DataType: dataType, + DefaultTags: defaultTags, + FieldName: fieldName, + } +} diff --git a/plugins/parsers/value/parser_test.go b/plugins/parsers/value/parser_test.go index 667fb108cfbfb..5a74085d82980 100644 --- a/plugins/parsers/value/parser_test.go +++ b/plugins/parsers/value/parser_test.go @@ -7,10 +7,7 @@ import ( ) func TestParseValidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -20,10 +17,7 @@ func TestParseValidValues(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) metrics, err = parser.Parse([]byte("64")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -33,10 +27,7 @@ func TestParseValidValues(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "string", - } + parser = NewValueParser("value_test", "string", "", nil) metrics, err = parser.Parse([]byte("foobar")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -46,10 +37,7 @@ func TestParseValidValues(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) metrics, err = parser.Parse([]byte("true")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -61,10 +49,7 @@ func TestParseValidValues(t *testing.T) { } func TestParseMultipleValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte(`55 45 223 @@ -80,11 +65,19 @@ func TestParseMultipleValues(t *testing.T) { assert.Equal(t, map[string]string{}, metrics[0].Tags()) } +func TestParseCustomFieldName(t *testing.T) { + parser := NewValueParser("value_test", "integer", "", nil) + parser.FieldName = "penguin" + metrics, err := parser.Parse([]byte(`55`)) + + assert.NoError(t, err) + assert.Equal(t, map[string]interface{}{ + "penguin": int64(55), + }, metrics[0].Fields()) +} + func TestParseLineValidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metric, err := parser.ParseLine("55") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -93,10 +86,7 @@ func TestParseLineValidValues(t *testing.T) { }, metric.Fields()) assert.Equal(t, map[string]string{}, metric.Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) metric, err = parser.ParseLine("64") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -105,10 +95,7 @@ func TestParseLineValidValues(t *testing.T) { }, metric.Fields()) assert.Equal(t, map[string]string{}, metric.Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "string", - } + parser = NewValueParser("value_test", "string", "", nil) metric, err = parser.ParseLine("foobar") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -117,10 +104,7 @@ func TestParseLineValidValues(t *testing.T) { }, metric.Fields()) assert.Equal(t, map[string]string{}, metric.Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) metric, err = parser.ParseLine("true") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -131,59 +115,38 @@ func TestParseLineValidValues(t *testing.T) { } func TestParseInvalidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55.0")) assert.Error(t, err) assert.Len(t, metrics, 0) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) metrics, err = parser.Parse([]byte("foobar")) assert.Error(t, err) assert.Len(t, metrics, 0) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) metrics, err = parser.Parse([]byte("213")) assert.Error(t, err) assert.Len(t, metrics, 0) } func TestParseLineInvalidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) _, err := parser.ParseLine("55.0") assert.Error(t, err) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) _, err = parser.ParseLine("foobar") assert.Error(t, err) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) _, err = parser.ParseLine("213") assert.Error(t, err) } func TestParseValidValuesDefaultTags(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err := parser.Parse([]byte("55")) assert.NoError(t, err) @@ -194,10 +157,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("64")) assert.NoError(t, err) @@ -208,10 +168,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "string", - } + parser = NewValueParser("value_test", "string", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("foobar")) assert.NoError(t, err) @@ -222,10 +179,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("true")) assert.NoError(t, err) @@ -238,10 +192,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { } func TestParseValuesWithNullCharacter(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55\x00")) assert.NoError(t, err) assert.Len(t, metrics, 1) diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 5ed37645cdd74..4afa199663733 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -30,9 +30,6 @@ type LoopedParser struct { wrappedParser ElementParser wsParser *WhiteSpaceParser } -type LiteralParser struct { - literal string -} func (ep *NameParser) parse(p *PointParser, pt *Point) error { //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). @@ -55,12 +52,12 @@ func (ep *ValueParser) parse(p *PointParser, pt *Point) error { } p.writeBuf.Reset() - if tok == MINUS_SIGN { + if tok == MinusSign { p.writeBuf.WriteString(lit) tok, lit = p.scan() } - for tok != EOF && (tok == LETTER || tok == NUMBER || tok == DOT || tok == MINUS_SIGN) { + for tok != EOF && (tok == Letter || tok == Number || tok == Dot || tok == MinusSign) { p.writeBuf.WriteString(lit) tok, lit = p.scan() } @@ -84,7 +81,7 @@ func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { return fmt.Errorf("found %q, expected number", lit) } - if tok != NUMBER { + if tok != Number { if ep.optional { p.unscanTokens(2) return setTimestamp(pt, 0, 1) @@ -93,7 +90,7 @@ func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { } p.writeBuf.Reset() - for tok != EOF && tok == NUMBER { + for tok != EOF && tok == Number { p.writeBuf.WriteString(lit) tok, lit = p.scan() } @@ -108,7 +105,6 @@ func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { } func setTimestamp(pt *Point, ts int64, numDigits int) error { - if numDigits == 19 { // nanoseconds ts = ts / 1e9 @@ -154,7 +150,7 @@ func (ep *TagParser) parse(p *PointParser, pt *Point) error { } next, lit := p.scan() - if next != EQUALS { + if next != Equals { return fmt.Errorf("found %q, expected equals", lit) } @@ -169,9 +165,9 @@ func (ep *TagParser) parse(p *PointParser, pt *Point) error { return nil } -func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { - tok := WS - for tok != EOF && tok == WS { +func (ep *WhiteSpaceParser) parse(p *PointParser, _ *Point) error { + tok := Ws + for tok != EOF && tok == Ws { tok, _ = p.scan() } @@ -185,26 +181,14 @@ func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { return nil } -func (ep *LiteralParser) parse(p *PointParser, pt *Point) error { - l, err := parseLiteral(p) - if err != nil { - return err - } - - if l != ep.literal { - return fmt.Errorf("found %s, expected %s", l, ep.literal) - } - return nil -} - func parseQuotedLiteral(p *PointParser) (string, error) { p.writeBuf.Reset() escaped := false tok, lit := p.scan() - for tok != EOF && (tok != QUOTES || (tok == QUOTES && escaped)) { + for tok != EOF && (tok != Quotes || (tok == Quotes && escaped)) { // let everything through - escaped = tok == BACKSLASH + escaped = tok == Backslash p.writeBuf.WriteString(lit) tok, lit = p.scan() } @@ -220,19 +204,19 @@ func parseLiteral(p *PointParser) (string, error) { return "", fmt.Errorf("found %q, expected literal", lit) } - if tok == QUOTES { + if tok == Quotes { return parseQuotedLiteral(p) } p.writeBuf.Reset() - for tok != EOF && tok > literal_beg && tok < literal_end { + for tok != EOF && tok > literalBeg && tok < literalEnd { p.writeBuf.WriteString(lit) tok, lit = p.scan() - if tok == DELTA { + if tok == Delta { return "", errors.New("found delta inside metric name") } } - if tok == QUOTES { + if tok == Quotes { return "", errors.New("found quote inside unquoted literal") } p.unscan() diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index 7ae455d47dbbd..ad3e704c58390 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf/metric" ) -const MAX_BUFFER_SIZE = 2 +const MaxBufferSize = 2 type Point struct { Name string @@ -90,7 +90,6 @@ func (p *WavefrontParser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) { - // parse even if the buffer begins with a newline buf = bytes.TrimPrefix(buf, []byte("\n")) // add newline to end if not exists: @@ -133,7 +132,6 @@ func (p *WavefrontParser) SetDefaultTags(tags map[string]string) { } func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) for _, point := range points { @@ -154,10 +152,7 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M } fields["value"] = v - m, err := metric.New(point.Name, tags, fields, time.Unix(point.Timestamp, 0)) - if err != nil { - return nil, err - } + m := metric.New(point.Name, tags, fields, time.Unix(point.Timestamp, 0)) metrics = append(metrics, m) } @@ -170,9 +165,9 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M func (p *PointParser) scan() (Token, string) { // If we have a token on the buffer, then return it. if p.buf.n != 0 { - idx := p.buf.n % MAX_BUFFER_SIZE + idx := p.buf.n % MaxBufferSize tok, lit := p.buf.tok[idx], p.buf.lit[idx] - p.buf.n -= 1 + p.buf.n-- return tok, lit } @@ -188,8 +183,8 @@ func (p *PointParser) scan() (Token, string) { func (p *PointParser) buffer(tok Token, lit string) { // create the buffer if it is empty if len(p.buf.tok) == 0 { - p.buf.tok = make([]Token, MAX_BUFFER_SIZE) - p.buf.lit = make([]string, MAX_BUFFER_SIZE) + p.buf.tok = make([]Token, MaxBufferSize) + p.buf.lit = make([]string, MaxBufferSize) } // for now assume a simple circular buffer of length two @@ -203,15 +198,14 @@ func (p *PointParser) unscan() { } func (p *PointParser) unscanTokens(n int) { - if n > MAX_BUFFER_SIZE { + if n > MaxBufferSize { // just log for now - log.Printf("cannot unscan more than %d tokens", MAX_BUFFER_SIZE) + log.Printf("cannot unscan more than %d tokens", MaxBufferSize) } p.buf.n += n } func (p *PointParser) reset(buf []byte) { - // reset the scan buffer and write new byte p.scanBuf.Reset() p.scanBuf.Write(buf) diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index fed31b5f247b5..0165b499946e0 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -14,79 +14,66 @@ func TestParse(t *testing.T) { parsedMetrics, err := parser.Parse([]byte("test.metric 1")) assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936")) assert.NoError(t, err) - testMetric, err = metric.New("\u2206test.delta", map[string]string{}, + testMetric = metric.New("\u2206test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936")) assert.NoError(t, err) - testMetric, err = metric.New("\u0394test.delta", map[string]string{}, + testMetric = metric.New("\u0394test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) - } func TestParseLine(t *testing.T) { @@ -94,39 +81,33 @@ func TestParseLine(t *testing.T) { parsedMetric, err := parser.ParseLine("test.metric 1") assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) assert.Equal(t, parsedMetric.Name(), testMetric.Name()) assert.Equal(t, parsedMetric.Fields(), testMetric.Fields()) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 source=mysource") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 source=\"mysource\"") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) } @@ -135,10 +116,8 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) assert.NoError(t, err) - testMetric1, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.NoError(t, err) - testMetric2, err := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) testMetrics := []telegraf.Metric{testMetric1, testMetric2} assert.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name()) assert.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) @@ -146,33 +125,25 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) assert.NoError(t, err) - testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2} assert.EqualValues(t, parsedMetrics, testMetrics) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\ntest.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) assert.NoError(t, err) - testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2} assert.EqualValues(t, parsedMetrics, testMetrics) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit")) assert.NoError(t, err) - testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric3, err := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + testMetric3 := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3} assert.EqualValues(t, parsedMetrics, testMetrics) - } func TestParseSpecial(t *testing.T) { @@ -180,16 +151,13 @@ func TestParseSpecial(t *testing.T) { parsedMetric, err := parser.ParseLine("\"test.metric\" 1 1530939936") assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 tag1=\"val\\\"ue1\"") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) - } func TestParseInvalid(t *testing.T) { @@ -221,7 +189,6 @@ func TestParseInvalid(t *testing.T) { _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.Error(t, err) - } func TestParseDefaultTags(t *testing.T) { @@ -229,20 +196,16 @@ func TestParseDefaultTags(t *testing.T) { parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) - } diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go index a528f72ee52cd..70558e604dbb8 100644 --- a/plugins/parsers/wavefront/scanner.go +++ b/plugins/parsers/wavefront/scanner.go @@ -24,24 +24,18 @@ func (s *PointScanner) read() rune { return ch } -// unread places the previously read rune back on the reader. -func (s *PointScanner) unread() { - _ = s.r.UnreadRune() -} - // Scan returns the next token and literal value. func (s *PointScanner) Scan() (Token, string) { - // Read the next rune ch := s.read() if isWhitespace(ch) { - return WS, string(ch) + return Ws, string(ch) } else if isLetter(ch) { - return LETTER, string(ch) + return Letter, string(ch) } else if isNumber(ch) { - return NUMBER, string(ch) + return Number, string(ch) } else if isDelta(ch) { - return DELTA, string(ch) + return Delta, string(ch) } // Otherwise read the individual character. @@ -49,23 +43,23 @@ func (s *PointScanner) Scan() (Token, string) { case eof: return EOF, "" case '\n': - return NEWLINE, string(ch) + return Newline, string(ch) case '.': - return DOT, string(ch) + return Dot, string(ch) case '-': - return MINUS_SIGN, string(ch) + return MinusSign, string(ch) case '_': - return UNDERSCORE, string(ch) + return Underscore, string(ch) case '/': - return SLASH, string(ch) + return Slash, string(ch) case '\\': - return BACKSLASH, string(ch) + return Backslash, string(ch) case ',': - return COMMA, string(ch) + return Comma, string(ch) case '"': - return QUOTES, string(ch) + return Quotes, string(ch) case '=': - return EQUALS, string(ch) + return Equals, string(ch) } - return ILLEGAL, string(ch) + return Illegal, string(ch) } diff --git a/plugins/parsers/wavefront/token.go b/plugins/parsers/wavefront/token.go index 5b77d0cdbb69b..68619e21bb6c4 100644 --- a/plugins/parsers/wavefront/token.go +++ b/plugins/parsers/wavefront/token.go @@ -4,27 +4,27 @@ type Token int const ( // Special tokens - ILLEGAL Token = iota + Illegal Token = iota EOF - WS + Ws // Literals - literal_beg - LETTER // metric name, source/point tags - NUMBER - MINUS_SIGN - UNDERSCORE - DOT - SLASH - BACKSLASH - COMMA - DELTA - literal_end + literalBeg + Letter // metric name, source/point tags + Number + MinusSign + Underscore + Dot + Slash + Backslash + Comma + Delta + literalEnd // Misc characters - QUOTES - EQUALS - NEWLINE + Quotes + Equals + Newline ) func isWhitespace(ch rune) bool { diff --git a/plugins/parsers/xpath/README.md b/plugins/parsers/xpath/README.md new file mode 100644 index 0000000000000..09823bbacf982 --- /dev/null +++ b/plugins/parsers/xpath/README.md @@ -0,0 +1,384 @@ +# XPath + +The XPath data format parser parses different formats into metric fields using [XPath][xpath] expressions. + +For supported XPath functions check [the underlying XPath library][xpath lib]. + +**NOTE:** The type of fields are specified using [XPath functions][xpath lib]. The only exception are *integer* fields that need to be specified in a `fields_int` section. + +### Supported data formats +| name | `data_format` setting | comment | +| --------------------------------------- | --------------------- | ------- | +| [Extensible Markup Language (XML)][xml] | `"xml"` | | +| [JSON][json] | `"xpath_json"` | | +| [MessagePack][msgpack] | `"xpath_msgpack"` | | +| [Protocol buffers][protobuf] | `"xpath_protobuf"` | [see additional parameters](protocol-buffers-additiona-settings)| + +#### Protocol buffers additional settings +For using the protocol-buffer format you need to specify a protocol buffer definition file (`.proto`) in `xpath_protobuf_file`, Furthermore, you need to specify which message type you want to use via `xpath_protobuf_type`. + +### Configuration (explicit) +In this configuration mode, you explicitly specify the field and tags you want to scrape out of your data. +```toml +[[inputs.file]] + files = ["example.xml"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "xml" + + ## PROTOCOL BUFFER definitions + ## Protocol buffer definition file + # xpath_protobuf_file = "sparkplug_b.proto" + ## Name of the protocol buffer message type to use in a fully qualified form. + # xpath_protobuf_type = ""org.eclipse.tahu.protobuf.Payload"" + + ## Print the internal XML document when in debug logging mode. + ## This is especially useful when using the parser with non-XML formats like protocol buffers + ## to get an idea on the expression necessary to derive fields etc. + # xpath_print_document = false + + ## Multiple parsing sections are allowed + [[inputs.file.xpath]] + ## Optional: XPath-query to select a subset of nodes from the XML document. + # metric_selection = "/Bus/child::Sensor" + + ## Optional: XPath-query to set the metric (measurement) name. + # metric_name = "string('example')" + + ## Optional: Query to extract metric timestamp. + ## If not specified the time of execution is used. + # timestamp = "/Gateway/Timestamp" + ## Optional: Format of the timestamp determined by the query above. + ## This can be any of "unix", "unix_ms", "unix_us", "unix_ns" or a valid Golang + ## time format. If not specified, a "unix" timestamp (in seconds) is expected. + # timestamp_format = "2006-01-02T15:04:05Z" + + ## Tag definitions using the given XPath queries. + [inputs.file.xpath.tags] + name = "substring-after(Sensor/@name, ' ')" + device = "string('the ultimate sensor')" + + ## Integer field definitions using XPath queries. + [inputs.file.xpath.fields_int] + consumers = "Variable/@consumers" + + ## Non-integer field definitions using XPath queries. + ## The field type is defined using XPath expressions such as number(), boolean() or string(). If no conversion is performed the field will be of type string. + [inputs.file.xpath.fields] + temperature = "number(Variable/@temperature)" + power = "number(Variable/@power)" + frequency = "number(Variable/@frequency)" + ok = "Mode != 'ok'" +``` + +A configuration can contain muliple *xpath* subsections for e.g. the file plugin to process the xml-string multiple times. Consult the [XPath syntax][xpath] and the [underlying library's functions][xpath lib] for details and help regarding XPath queries. Consider using an XPath tester such as [xpather.com][xpather] or [Code Beautify's XPath Tester][xpath tester] for help developing and debugging +your query. + +## Configuration (batch) + +Alternatively to the configuration above, fields can also be specified in a batch way. So contrary to specify the fields +in a section, you can define a `name` and a `value` selector used to determine the name and value of the fields in the +metric. +```toml +[[inputs.file]] + files = ["example.xml"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "xml" + + ## Name of the protocol buffer type to use. + ## This is only relevant when parsing protocol buffers and must contain the fully qualified + ## name of the type e.g. "org.eclipse.tahu.protobuf.Payload". + # xpath_protobuf_type = "" + + ## Print the internal XML document when in debug logging mode. + ## This is especially useful when using the parser with non-XML formats like protocol buffers + ## to get an idea on the expression necessary to derive fields etc. + # xpath_print_document = false + + ## Multiple parsing sections are allowed + [[inputs.file.xpath]] + ## Optional: XPath-query to select a subset of nodes from the XML document. + metric_selection = "/Bus/child::Sensor" + + ## Optional: XPath-query to set the metric (measurement) name. + # metric_name = "string('example')" + + ## Optional: Query to extract metric timestamp. + ## If not specified the time of execution is used. + # timestamp = "/Gateway/Timestamp" + ## Optional: Format of the timestamp determined by the query above. + ## This can be any of "unix", "unix_ms", "unix_us", "unix_ns" or a valid Golang + ## time format. If not specified, a "unix" timestamp (in seconds) is expected. + # timestamp_format = "2006-01-02T15:04:05Z" + + ## Field specifications using a selector. + field_selection = "child::*" + ## Optional: Queries to specify field name and value. + ## These options are only to be used in combination with 'field_selection'! + ## By default the node name and node content is used if a field-selection + ## is specified. + # field_name = "name()" + # field_value = "." + + ## Optional: Expand field names relative to the selected node + ## This allows to flatten out nodes with non-unique names in the subtree + # field_name_expansion = false + + ## Tag definitions using the given XPath queries. + [inputs.file.xpath.tags] + name = "substring-after(Sensor/@name, ' ')" + device = "string('the ultimate sensor')" + +``` +*Please note*: The resulting fields are _always_ of type string! + +It is also possible to specify a mixture of the two alternative ways of specifying fields. + +#### metric_selection (optional) + +You can specify a [XPath][xpath] query to select a subset of nodes from the XML document, each used to generate a new +metrics with the specified fields, tags etc. + +For relative queries in subsequent queries they are relative to the `metric_selection`. To specify absolute paths, please start the query with a slash (`/`). + +Specifying `metric_selection` is optional. If not specified all relative queries are relative to the root node of the XML document. + +#### metric_name (optional) + +By specifying `metric_name` you can override the metric/measurement name with the result of the given [XPath][xpath] query. If not specified, the default metric name is used. + +#### timestamp, timestamp_format (optional) + +By default the current time will be used for all created metrics. To set the time from values in the XML document you can specify a [XPath][xpath] query in `timestamp` and set the format in `timestamp_format`. + +The `timestamp_format` can be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +an accepted [Go "reference time"][time const]. Consult the Go [time][time parse] package for details and additional examples on how to set the time format. +If `timestamp_format` is omitted `unix` format is assumed as result of the `timestamp` query. + +#### tags sub-section + +[XPath][xpath] queries in the `tag name = query` format to add tags to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. + +**NOTE:** Results of tag-queries will always be converted to strings. + +#### fields_int sub-section + +[XPath][xpath] queries in the `field name = query` format to add integer typed fields to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. + +**NOTE:** Results of field_int-queries will always be converted to **int64**. The conversion will fail in case the query result is not convertible! + +#### fields sub-section + +[XPath][xpath] queries in the `field name = query` format to add non-integer fields to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. + +The type of the field is specified in the [XPath][xpath] query using the type conversion functions of XPath such as `number()`, `boolean()` or `string()` +If no conversion is performed in the query the field will be of type string. + +**NOTE: Path conversion functions will always succeed even if you convert a text to float!** + + +#### field_selection, field_name, field_value (optional) + +You can specify a [XPath][xpath] query to select a set of nodes forming the fields of the metric. The specified path can be absolute (starting with `/`) or relative to the currently selected node. Each node selected by `field_selection` forms a new field within the metric. + +The *name* and the *value* of each field can be specified using the optional `field_name` and `field_value` queries. The queries are relative to the selected field if not starting with `/`. If not specified the field's *name* defaults to the node name and the field's *value* defaults to the content of the selected field node. +**NOTE**: `field_name` and `field_value` queries are only evaluated if a `field_selection` is specified. + +Specifying `field_selection` is optional. This is an alternative way to specify fields especially for documents where the node names are not known a priori or if there is a large number of fields to be specified. These options can also be combined with the field specifications above. + +**NOTE: Path conversion functions will always succeed even if you convert a text to float!** + +#### field_name_expansion (optional) + +When *true*, field names selected with `field_selection` are expanded to a *path* relative to the *selected node*. This +is necessary if we e.g. select all leaf nodes as fields and those leaf nodes do not have unique names. That is in case +you have duplicate names in the fields you select you should set this to `true`. + +### Examples + +This `example.xml` file is used in the configuration examples below: +```xml + + + Main Gateway + 2020-08-01T15:04:03Z + 12 + ok + + + + + + + + + busy + + + + + + + standby + + + + + + + error + + +``` + +#### Basic Parsing + +This example shows the basic usage of the xml parser. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xpath]] + [inputs.file.xpath.tags] + gateway = "substring-before(/Gateway/Name, ' ')" + + [inputs.file.xpath.fields_int] + seqnr = "/Gateway/Sequence" + + [inputs.file.xpath.fields] + ok = "/Gateway/Status = 'ok'" +``` + +Output: +``` +file,gateway=Main,host=Hugin seqnr=12i,ok=true 1598610830000000000 +``` + +In the *tags* definition the XPath function `substring-before()` is used to only extract the sub-string before the space. To get the integer value of `/Gateway/Sequence` we have to use the *fields_int* section as there is no XPath expression to convert node values to integers (only float). +The `ok` field is filled with a boolean by specifying a query comparing the query result of `/Gateway/Status` with the string *ok*. Use the type conversions available in the XPath syntax to specify field types. + +#### Time and metric names + +This is an example for using time and name of the metric from the XML document itself. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xpath]] + metric_name = "name(/Gateway/Status)" + + timestamp = "/Gateway/Timestamp" + timestamp_format = "2006-01-02T15:04:05Z" + + [inputs.file.xpath.tags] + gateway = "substring-before(/Gateway/Name, ' ')" + + [inputs.file.xpath.fields] + ok = "/Gateway/Status = 'ok'" +``` + +Output: +``` +Status,gateway=Main,host=Hugin ok=true 1596294243000000000 +``` +Additionally to the basic parsing example, the metric name is defined as the name of the `/Gateway/Status` node and the timestamp is derived from the XML document instead of using the execution time. + +#### Multi-node selection + +For XML documents containing metrics for e.g. multiple devices (like `Sensor`s in the *example.xml*), multiple metrics can be generated using node selection. This example shows how to generate a metric for each *Sensor* in the example. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xpath]] + metric_selection = "/Bus/child::Sensor" + + metric_name = "string('sensors')" + + timestamp = "/Gateway/Timestamp" + timestamp_format = "2006-01-02T15:04:05Z" + + [inputs.file.xpath.tags] + name = "substring-after(@name, ' ')" + + [inputs.file.xpath.fields_int] + consumers = "Variable/@consumers" + + [inputs.file.xpath.fields] + temperature = "number(Variable/@temperature)" + power = "number(Variable/@power)" + frequency = "number(Variable/@frequency)" + ok = "Mode != 'error'" + +``` + +Output: +``` +sensors,host=Hugin,name=Facility\ A consumers=3i,frequency=49.78,ok=true,power=123.4,temperature=20 1596294243000000000 +sensors,host=Hugin,name=Facility\ B consumers=1i,frequency=49.78,ok=true,power=14.3,temperature=23.1 1596294243000000000 +sensors,host=Hugin,name=Facility\ C consumers=0i,frequency=49.78,ok=false,power=0.02,temperature=19.7 1596294243000000000 +``` + +Using the `metric_selection` option we select all `Sensor` nodes in the XML document. Please note that all field and tag definitions are relative to these selected nodes. An exception is the timestamp definition which is relative to the root node of the XML document. + +#### Batch field processing with multi-node selection + +For XML documents containing metrics with a large number of fields or where the fields are not known before (e.g. an unknown set of `Variable` nodes in the *example.xml*), field selectors can be used. This example shows how to generate a metric for each *Sensor* in the example with fields derived from the *Variable* nodes. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xpath]] + metric_selection = "/Bus/child::Sensor" + metric_name = "string('sensors')" + + timestamp = "/Gateway/Timestamp" + timestamp_format = "2006-01-02T15:04:05Z" + + field_selection = "child::Variable" + field_name = "name(@*[1])" + field_value = "number(@*[1])" + + [inputs.file.xpath.tags] + name = "substring-after(@name, ' ')" +``` + +Output: +``` +sensors,host=Hugin,name=Facility\ A consumers=3,frequency=49.78,power=123.4,temperature=20 1596294243000000000 +sensors,host=Hugin,name=Facility\ B consumers=1,frequency=49.78,power=14.3,temperature=23.1 1596294243000000000 +sensors,host=Hugin,name=Facility\ C consumers=0,frequency=49.78,power=0.02,temperature=19.7 1596294243000000000 +``` + +Using the `metric_selection` option we select all `Sensor` nodes in the XML document. For each *Sensor* we then use `field_selection` to select all child nodes of the sensor as *field-nodes* Please note that the field selection is relative to the selected nodes. +For each selected *field-node* we use `field_name` and `field_value` to determining the field's name and value, respectively. The `field_name` derives the name of the first attribute of the node, while `field_value` derives the value of the first attribute and converts the result to a number. + +[xpath lib]: https://github.com/antchfx/xpath +[json]: https://www.json.org/ +[msgpack]: https://msgpack.org/ +[protobuf]: https://developers.google.com/protocol-buffers +[xml]: https://www.w3.org/XML/ +[xpath]: https://www.w3.org/TR/xpath/ +[xpather]: http://xpather.com/ +[xpath tester]: https://codebeautify.org/Xpath-Tester +[time const]: https://golang.org/pkg/time/#pkg-constants +[time parse]: https://golang.org/pkg/time/#Parse diff --git a/plugins/parsers/xpath/json_document.go b/plugins/parsers/xpath/json_document.go new file mode 100644 index 0000000000000..155ed6335bbfe --- /dev/null +++ b/plugins/parsers/xpath/json_document.go @@ -0,0 +1,65 @@ +package xpath + +import ( + "strings" + + "github.com/antchfx/jsonquery" + path "github.com/antchfx/xpath" +) + +type jsonDocument struct{} + +func (d *jsonDocument) Parse(buf []byte) (dataNode, error) { + return jsonquery.Parse(strings.NewReader(string(buf))) +} + +func (d *jsonDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + // If this panics it's a programming error as we changed the document type while processing + native, err := jsonquery.QueryAll(node.(*jsonquery.Node), expr) + if err != nil { + return nil, err + } + + nodes := make([]dataNode, len(native)) + for i, n := range native { + nodes[i] = n + } + return nodes, nil +} + +func (d *jsonDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + // If this panics it's a programming error as we changed the document type while processing + return jsonquery.CreateXPathNavigator(node.(*jsonquery.Node)) +} + +func (d *jsonDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + names := make([]string, 0) + + // If these panic it's a programming error as we changed the document type while processing + nativeNode := node.(*jsonquery.Node) + nativeRelativeTo := relativeTo.(*jsonquery.Node) + + // Climb up the tree and collect the node names + n := nativeNode.Parent + for n != nil && n != nativeRelativeTo { + names = append(names, n.Data) + n = n.Parent + } + + if len(names) < 1 { + return "" + } + + // Construct the nodes + nodepath := "" + for _, name := range names { + nodepath = name + sep + nodepath + } + + return nodepath[:len(nodepath)-1] +} + +func (d *jsonDocument) OutputXML(node dataNode) string { + native := node.(*jsonquery.Node) + return native.OutputXML() +} diff --git a/plugins/parsers/xpath/msgpack_document.go b/plugins/parsers/xpath/msgpack_document.go new file mode 100644 index 0000000000000..6f5102deefdf4 --- /dev/null +++ b/plugins/parsers/xpath/msgpack_document.go @@ -0,0 +1,39 @@ +package xpath + +import ( + "bytes" + "fmt" + + "github.com/tinylib/msgp/msgp" + + "github.com/antchfx/jsonquery" + path "github.com/antchfx/xpath" +) + +type msgpackDocument jsonDocument + +func (d *msgpackDocument) Parse(buf []byte) (dataNode, error) { + var json bytes.Buffer + + // Unmarshal the message-pack binary message to JSON and proceed with the jsonquery class + if _, err := msgp.UnmarshalAsJSON(&json, buf); err != nil { + return nil, fmt.Errorf("unmarshalling to json failed: %v", err) + } + return jsonquery.Parse(&json) +} + +func (d *msgpackDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + return (*jsonDocument)(d).QueryAll(node, expr) +} + +func (d *msgpackDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + return (*jsonDocument)(d).CreateXPathNavigator(node) +} + +func (d *msgpackDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + return (*jsonDocument)(d).GetNodePath(node, relativeTo, sep) +} + +func (d *msgpackDocument) OutputXML(node dataNode) string { + return (*jsonDocument)(d).OutputXML(node) +} diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go new file mode 100644 index 0000000000000..75ebfd92035c1 --- /dev/null +++ b/plugins/parsers/xpath/parser.go @@ -0,0 +1,456 @@ +package xpath + +import ( + "fmt" + "strconv" + "strings" + "time" + + path "github.com/antchfx/xpath" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type dataNode interface{} + +type dataDocument interface { + Parse(buf []byte) (dataNode, error) + QueryAll(node dataNode, expr string) ([]dataNode, error) + CreateXPathNavigator(node dataNode) path.NodeNavigator + GetNodePath(node, relativeTo dataNode, sep string) string + OutputXML(node dataNode) string +} + +type Parser struct { + Format string + ProtobufMessageDef string + ProtobufMessageType string + PrintDocument bool + Configs []Config + DefaultTags map[string]string + Log telegraf.Logger + + document dataDocument +} + +type Config struct { + MetricDefaultName string `toml:"-"` + MetricQuery string `toml:"metric_name"` + Selection string `toml:"metric_selection"` + Timestamp string `toml:"timestamp"` + TimestampFmt string `toml:"timestamp_format"` + Tags map[string]string `toml:"tags"` + Fields map[string]string `toml:"fields"` + FieldsInt map[string]string `toml:"fields_int"` + + FieldSelection string `toml:"field_selection"` + FieldNameQuery string `toml:"field_name"` + FieldValueQuery string `toml:"field_value"` + FieldNameExpand bool `toml:"field_name_expansion"` +} + +func (p *Parser) Init() error { + switch p.Format { + case "", "xml": + p.document = &xmlDocument{} + case "xpath_json": + p.document = &jsonDocument{} + case "xpath_msgpack": + p.document = &msgpackDocument{} + case "xpath_protobuf": + pbdoc := protobufDocument{ + MessageDefinition: p.ProtobufMessageDef, + MessageType: p.ProtobufMessageType, + Log: p.Log, + } + if err := pbdoc.Init(); err != nil { + return err + } + p.document = &pbdoc + default: + return fmt.Errorf("unknown data-format %q for xpath parser", p.Format) + } + + return nil +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + t := time.Now() + + // Parse the XML + doc, err := p.document.Parse(buf) + if err != nil { + return nil, err + } + if p.PrintDocument { + p.Log.Debugf("XML document equivalent: %q", p.document.OutputXML(doc)) + } + + // Queries + metrics := make([]telegraf.Metric, 0) + for _, config := range p.Configs { + if len(config.Selection) == 0 { + config.Selection = "/" + } + selectedNodes, err := p.document.QueryAll(doc, config.Selection) + if err != nil { + return nil, err + } + if len(selectedNodes) < 1 || selectedNodes[0] == nil { + p.debugEmptyQuery("metric selection", doc, config.Selection) + return nil, fmt.Errorf("cannot parse with empty selection node") + } + p.Log.Debugf("Number of selected metric nodes: %d", len(selectedNodes)) + + for _, selected := range selectedNodes { + m, err := p.parseQuery(t, doc, selected, config) + if err != nil { + return metrics, err + } + + metrics = append(metrics, m) + } + } + + return metrics, nil +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + t := time.Now() + + switch len(p.Configs) { + case 0: + return nil, nil + case 1: + config := p.Configs[0] + + doc, err := p.document.Parse([]byte(line)) + if err != nil { + return nil, err + } + + selected := doc + if len(config.Selection) > 0 { + selectedNodes, err := p.document.QueryAll(doc, config.Selection) + if err != nil { + return nil, err + } + if len(selectedNodes) < 1 || selectedNodes[0] == nil { + p.debugEmptyQuery("metric selection", doc, config.Selection) + return nil, fmt.Errorf("cannot parse line with empty selection") + } else if len(selectedNodes) != 1 { + return nil, fmt.Errorf("cannot parse line with multiple selected nodes (%d)", len(selectedNodes)) + } + selected = selectedNodes[0] + } + + return p.parseQuery(t, doc, selected, config) + } + return nil, fmt.Errorf("cannot parse line with multiple (%d) configurations", len(p.Configs)) +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config Config) (telegraf.Metric, error) { + var timestamp time.Time + var metricname string + + // Determine the metric name. If a query was specified, use the result of this query and the default metric name + // otherwise. + metricname = config.MetricDefaultName + if len(config.MetricQuery) > 0 { + v, err := p.executeQuery(doc, selected, config.MetricQuery) + if err != nil { + return nil, fmt.Errorf("failed to query metric name: %v", err) + } + var ok bool + if metricname, ok = v.(string); !ok { + if v == nil { + p.Log.Infof("Hint: Empty metric-name-node. If you wanted to set a constant please use `metric_name = \"'name'\"`.") + } + return nil, fmt.Errorf("failed to query metric name: query result is of type %T not 'string'", v) + } + } + + // By default take the time the parser was invoked and override the value + // with the queried timestamp if an expresion was specified. + timestamp = starttime + if len(config.Timestamp) > 0 { + v, err := p.executeQuery(doc, selected, config.Timestamp) + if err != nil { + return nil, fmt.Errorf("failed to query timestamp: %v", err) + } + switch v := v.(type) { + case string: + // Parse the string with the given format or assume the string to contain + // a unix timestamp in seconds if no format is given. + if len(config.TimestampFmt) < 1 || strings.HasPrefix(config.TimestampFmt, "unix") { + var nanoseconds int64 + + t, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse unix timestamp: %v", err) + } + + switch config.TimestampFmt { + case "unix_ns": + nanoseconds = int64(t) + case "unix_us": + nanoseconds = int64(t * 1e3) + case "unix_ms": + nanoseconds = int64(t * 1e6) + default: + nanoseconds = int64(t * 1e9) + } + timestamp = time.Unix(0, nanoseconds) + } else { + timestamp, err = time.Parse(config.TimestampFmt, v) + if err != nil { + return nil, fmt.Errorf("failed to query timestamp format: %v", err) + } + } + case float64: + // Assume the value to contain a timestamp in seconds and fractions thereof. + timestamp = time.Unix(0, int64(v*1e9)) + case nil: + // No timestamp found. Just ignore the time and use "starttime" + default: + return nil, fmt.Errorf("unknown format '%T' for timestamp query '%v'", v, config.Timestamp) + } + } + + // Query tags and add default ones + tags := make(map[string]string) + for name, query := range config.Tags { + // Execute the query and cast the returned values into strings + v, err := p.executeQuery(doc, selected, query) + if err != nil { + return nil, fmt.Errorf("failed to query tag '%s': %v", name, err) + } + switch v := v.(type) { + case string: + tags[name] = v + case bool: + tags[name] = strconv.FormatBool(v) + case float64: + tags[name] = strconv.FormatFloat(v, 'G', -1, 64) + case nil: + continue + default: + return nil, fmt.Errorf("unknown format '%T' for tag '%s'", v, name) + } + } + for name, v := range p.DefaultTags { + tags[name] = v + } + + // Query fields + fields := make(map[string]interface{}) + for name, query := range config.FieldsInt { + // Execute the query and cast the returned values into integers + v, err := p.executeQuery(doc, selected, query) + if err != nil { + return nil, fmt.Errorf("failed to query field (int) '%s': %v", name, err) + } + switch v := v.(type) { + case string: + fields[name], err = strconv.ParseInt(v, 10, 54) + if err != nil { + return nil, fmt.Errorf("failed to parse field (int) '%s': %v", name, err) + } + case bool: + fields[name] = int64(0) + if v { + fields[name] = int64(1) + } + case float64: + fields[name] = int64(v) + case nil: + continue + default: + return nil, fmt.Errorf("unknown format '%T' for field (int) '%s'", v, name) + } + } + + for name, query := range config.Fields { + // Execute the query and store the result in fields + v, err := p.executeQuery(doc, selected, query) + if err != nil { + return nil, fmt.Errorf("failed to query field '%s': %v", name, err) + } + fields[name] = v + } + + // Handle the field batch definitions if any. + if len(config.FieldSelection) > 0 { + fieldnamequery := "name()" + fieldvaluequery := "." + if len(config.FieldNameQuery) > 0 { + fieldnamequery = config.FieldNameQuery + } + if len(config.FieldValueQuery) > 0 { + fieldvaluequery = config.FieldValueQuery + } + + // Query all fields + selectedFieldNodes, err := p.document.QueryAll(selected, config.FieldSelection) + if err != nil { + return nil, err + } + p.Log.Debugf("Number of selected field nodes: %d", len(selectedFieldNodes)) + if len(selectedFieldNodes) > 0 && selectedFieldNodes[0] != nil { + for _, selectedfield := range selectedFieldNodes { + n, err := p.executeQuery(doc, selectedfield, fieldnamequery) + if err != nil { + return nil, fmt.Errorf("failed to query field name with query '%s': %v", fieldnamequery, err) + } + name, ok := n.(string) + if !ok { + return nil, fmt.Errorf("failed to query field name with query '%s': result is not a string (%v)", fieldnamequery, n) + } + v, err := p.executeQuery(doc, selectedfield, fieldvaluequery) + if err != nil { + return nil, fmt.Errorf("failed to query field value for '%s': %v", name, err) + } + path := name + if config.FieldNameExpand { + p := p.document.GetNodePath(selectedfield, selected, "_") + if len(p) > 0 { + path = p + "_" + name + } + } + + // Check if field name already exists and if so, append an index number. + if _, ok := fields[path]; ok { + for i := 1; ; i++ { + p := path + "_" + strconv.Itoa(i) + if _, ok := fields[p]; !ok { + path = p + break + } + } + } + + fields[path] = v + } + } else { + p.debugEmptyQuery("field selection", selected, config.FieldSelection) + } + } + + return metric.New(metricname, tags, fields, timestamp), nil +} + +func (p *Parser) executeQuery(doc, selected dataNode, query string) (r interface{}, err error) { + // Check if the query is relative or absolute and set the root for the query + root := selected + if strings.HasPrefix(query, "/") { + root = doc + } + + // Compile the query + expr, err := path.Compile(query) + if err != nil { + return nil, fmt.Errorf("failed to compile query '%s': %v", query, err) + } + + // Evaluate the compiled expression and handle returned node-iterators + // separately. Those iterators will be returned for queries directly + // referencing a node (value or attribute). + n := expr.Evaluate(p.document.CreateXPathNavigator(root)) + if iter, ok := n.(*path.NodeIterator); ok { + // We got an iterator, so take the first match and get the referenced + // property. This will always be a string. + if iter.MoveNext() { + r = iter.Current().Value() + } + } else { + r = n + } + + return r, nil +} + +func splitLastPathElement(query string) []string { + // This is a rudimentary xpath-parser that splits the path + // into the last path element and the remaining path-part. + // The last path element is then further splitted into + // parts such as attributes or selectors. Each returned + // element is a full path! + + // Nothing left + if query == "" || query == "/" || query == "//" || query == "." { + return []string{} + } + + seperatorIdx := strings.LastIndex(query, "/") + if seperatorIdx < 0 { + query = "./" + query + seperatorIdx = 1 + } + + // For double slash we want to split at the first slash + if seperatorIdx > 0 && query[seperatorIdx-1] == byte('/') { + seperatorIdx-- + } + + base := query[:seperatorIdx] + if base == "" { + base = "/" + } + + elements := make([]string, 1) + elements[0] = base + + offset := seperatorIdx + if i := strings.Index(query[offset:], "::"); i >= 0 { + // Check for axis operator + offset += i + elements = append(elements, query[:offset]+"::*") + } + + if i := strings.Index(query[offset:], "["); i >= 0 { + // Check for predicates + offset += i + elements = append(elements, query[:offset]) + } else if i := strings.Index(query[offset:], "@"); i >= 0 { + // Check for attributes + offset += i + elements = append(elements, query[:offset]) + } + + return elements +} + +func (p *Parser) debugEmptyQuery(operation string, root dataNode, initialquery string) { + if p.Log == nil { + return + } + + query := initialquery + + // We already know that the + p.Log.Debugf("got 0 nodes for query %q in %s", query, operation) + for { + parts := splitLastPathElement(query) + if len(parts) < 1 { + return + } + for i := len(parts) - 1; i >= 0; i-- { + q := parts[i] + nodes, err := p.document.QueryAll(root, q) + if err != nil { + p.Log.Debugf("executing query %q in %s failed: %v", q, operation, err) + return + } + p.Log.Debugf("got %d nodes for query %q in %s", len(nodes), q, operation) + if len(nodes) > 0 && nodes[0] != nil { + return + } + query = parts[0] + } + } +} diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go new file mode 100644 index 0000000000000..ead02e0392769 --- /dev/null +++ b/plugins/parsers/xpath/parser_test.go @@ -0,0 +1,1284 @@ +package xpath + +import ( + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/toml" + + "github.com/stretchr/testify/require" +) + +const invalidXML = ` + + This one has to fail due to missing end-tag +` + +const singleMetricValuesXML = ` + + + Device TestDevice1 + ok + 1577923199 + 1577923199128 + 1577923199128256 + 1577923199128256512 + 2020-01-01T23:59:59Z + 98247 + 98695.81 + true + this is a test + 42;23 + +` +const singleMetricAttributesXML = ` + + + + + + + + + + + + +` +const singleMetricMultiValuesXML = ` + + + + 1 + 2 + 3 + 4 + 5 + 6 + +` +const multipleNodesXML = ` + + + + 42.0 + 1 + ok + + + 42.1 + 0 + ok + + + 42.2 + 1 + ok + + + 42.3 + 0 + failed + + + 42.4 + 1 + failed + +` + +const metricNameQueryXML = ` + + + 1577923199 + + +` + +func TestParseInvalidXML(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expectedError string + }{ + { + name: "invalid XML (missing close tag)", + input: invalidXML, + configs: []Config{ + { + MetricQuery: "test", + Timestamp: "/Device_1/Timestamp_unix", + }, + }, + defaultTags: map[string]string{}, + expectedError: "XML syntax error on line 4: unexpected EOF", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expectedError, err.Error()) + }) + } +} + +func TestInvalidTypeQueriesFail(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expectedError string + }{ + { + name: "invalid field (int) type", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + FieldsInt: map[string]string{ + "a": "/Device_1/value_string", + }, + }, + }, + defaultTags: map[string]string{}, + expectedError: "failed to parse field (int) 'a': strconv.ParseInt: parsing \"this is a test\": invalid syntax", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expectedError, err.Error()) + }) + } +} + +func TestInvalidTypeQueries(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "invalid field type (number)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "number(/Device_1/value_string)", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": float64(0), + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "invalid field type (boolean)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "boolean(/Device_1/value_string)", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": true, + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseTimestamps(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse timestamp (no fmt)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse timestamp (unix)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + TimestampFmt: "unix", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse timestamp (unix_ms)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ms", + TimestampFmt: "unix_ms", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, int64(1577923199128*1e6)), + ), + }, + { + name: "parse timestamp (unix_us)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_us", + TimestampFmt: "unix_us", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, int64(1577923199128256*1e3)), + ), + }, + { + name: "parse timestamp (unix_us)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ns", + TimestampFmt: "unix_ns", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, int64(1577923199128256512)), + ), + }, + { + name: "parse timestamp (RFC3339)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso", + TimestampFmt: "2006-01-02T15:04:05Z", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseSingleValues(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse scalar values as string fields", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "/Device_1/value_int", + "b": "/Device_1/value_float", + "c": "/Device_1/value_bool", + "d": "/Device_1/value_string", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": "98247", + "b": "98695.81", + "c": "true", + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse scalar values as typed fields (w/o int)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "number(Device_1/value_int)", + "b": "number(/Device_1/value_float)", + "c": "boolean(/Device_1/value_bool)", + "d": "/Device_1/value_string", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 98247.0, + "b": 98695.81, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse values as typed fields (w/ int)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "b": "number(/Device_1/value_float)", + "c": "boolean(/Device_1/value_bool)", + "d": "/Device_1/value_string", + }, + FieldsInt: map[string]string{ + "a": "/Device_1/value_int", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 98247, + "b": 98695.81, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse substring values", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "x": "substring-before(/Device_1/value_position, ';')", + "y": "substring-after(/Device_1/value_position, ';')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "x": "42", + "y": "23", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse substring values (typed)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "x": "number(substring-before(/Device_1/value_position, ';'))", + "y": "number(substring-after(/Device_1/value_position, ';'))", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "x": 42.0, + "y": 23.0, + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse substring values (typed int)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + FieldsInt: map[string]string{ + "x": "substring-before(/Device_1/value_position, ';')", + "y": "substring-after(/Device_1/value_position, ';')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "x": 42, + "y": 23, + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse tags", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Tags: map[string]string{ + "state": "/Device_1/State", + "name": "substring-after(/Device_1/Name, ' ')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{ + "state": "ok", + "name": "TestDevice1", + }, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseSingleAttributes(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse attr timestamp (unix)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr timestamp (RFC3339)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso/@value", + TimestampFmt: "2006-01-02T15:04:05Z", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr as string fields", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "a": "/Device_1/attr_int/@_", + "b": "/Device_1/attr_float/@_", + "c": "/Device_1/attr_bool/@_", + "d": "/Device_1/attr_string/@_", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": "12345", + "b": "12345.678", + "c": "true", + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr as typed fields (w/o int)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "a": "number(/Device_1/attr_int/@_)", + "b": "number(/Device_1/attr_float/@_)", + "c": "boolean(/Device_1/attr_bool/@_)", + "d": "/Device_1/attr_string/@_", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 12345.0, + "b": 12345.678, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr as typed fields (w/ int)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "b": "number(/Device_1/attr_float/@_)", + "c": "boolean(/Device_1/attr_bool/@_)", + "d": "/Device_1/attr_string/@_", + }, + FieldsInt: map[string]string{ + "a": "/Device_1/attr_int/@_", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 12345, + "b": 12345.678, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr substring", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "name": "substring-after(/Device_1/Name/@value, ' ')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "name": "TestDevice1", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr tags", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Tags: map[string]string{ + "state": "/Device_1/State/@_", + "name": "substring-after(/Device_1/Name/@value, ' ')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{ + "state": "ok", + "name": "TestDevice1", + }, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr bool", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "a": "/Device_1/attr_bool_numeric/@_ = 1", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": true, + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseMultiValues(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "select values (float)", + input: singleMetricMultiValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", + Fields: map[string]string{ + "a": "number(/Device/Value[1])", + "b": "number(/Device/Value[2])", + "c": "number(/Device/Value[3])", + "d": "number(/Device/Value[4])", + "e": "number(/Device/Value[5])", + "f": "number(/Device/Value[6])", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 1.0, + "b": 2.0, + "c": 3.0, + "d": 4.0, + "e": 5.0, + "f": 6.0, + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "select values (int)", + input: singleMetricMultiValuesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", + FieldsInt: map[string]string{ + "a": "/Device/Value[1]", + "b": "/Device/Value[2]", + "c": "/Device/Value[3]", + "d": "/Device/Value[4]", + "e": "/Device/Value[5]", + "f": "/Device/Value[6]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 1, + "b": 2, + "c": 3, + "d": 4, + "e": 5, + "f": 6, + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseMultiNodes(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected []telegraf.Metric + }{ + { + name: "select all devices", + input: multipleNodesXML, + configs: []Config{ + { + MetricDefaultName: "test", + Selection: "/Device", + Timestamp: "/Timestamp/@value", + Fields: map[string]string{ + "value": "number(Value)", + "active": "Active = 1", + }, + FieldsInt: map[string]string{ + "mode": "Value/@mode", + }, + Tags: map[string]string{ + "name": "@name", + "state": "State", + }, + }, + }, + defaultTags: map[string]string{}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 1", + "state": "ok", + }, + map[string]interface{}{ + "value": 42.0, + "active": true, + "mode": 0, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 2", + "state": "ok", + }, + map[string]interface{}{ + "value": 42.1, + "active": false, + "mode": 1, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 3", + "state": "ok", + }, + map[string]interface{}{ + "value": 42.2, + "active": true, + "mode": 2, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 4", + "state": "failed", + }, + map[string]interface{}{ + "value": 42.3, + "active": false, + "mode": 3, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 5", + "state": "failed", + }, + map[string]interface{}{ + "value": 42.4, + "active": true, + "mode": 4, + }, + time.Unix(1577923199, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + actual, err := parser.Parse([]byte(tt.input)) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +func TestParseMetricQuery(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse metric name query", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "name(/Device_1/Metric/@*[1])", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "state", + map[string]string{}, + map[string]interface{}{ + "value": "ok", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse metric name constant", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "'the_metric'", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "the_metric", + map[string]string{}, + map[string]interface{}{ + "value": "ok", + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseErrors(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + expected string + }{ + { + name: "string metric name query", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "arbitrary", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + expected: "failed to query metric name: query result is of type not 'string'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expected, err.Error()) + }) + } +} + +func TestEmptySelection(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + }{ + { + name: "empty path", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "/Device/NonExisting", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + { + name: "empty pattern", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "//NonExisting", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + { + name: "empty axis", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "/Device/child::NonExisting", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + { + name: "empty predicate", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "/Device[@NonExisting=true]", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + _, err := parser.Parse([]byte(tt.input)) + require.Error(t, err) + require.Equal(t, err.Error(), "cannot parse with empty selection node") + }) + } +} + +func TestTestCases(t *testing.T) { + var tests = []struct { + name string + filename string + }{ + { + name: "explicit basic", + filename: "testcases/multisensor_explicit_basic.conf", + }, + { + name: "explicit batch", + filename: "testcases/multisensor_explicit_batch.conf", + }, + { + name: "field selection batch", + filename: "testcases/multisensor_selection_batch.conf", + }, + { + name: "earthquakes quakeml", + filename: "testcases/earthquakes.conf", + }, + { + name: "openweathermap forecast (xml)", + filename: "testcases/openweathermap_xml.conf", + }, + { + name: "openweathermap forecast (json)", + filename: "testcases/openweathermap_json.conf", + }, + { + name: "addressbook tutorial (protobuf)", + filename: "testcases/addressbook.conf", + }, + { + name: "message-pack", + filename: "testcases/tracker_msgpack.conf", + }, + } + + parser := influx.NewParser(influx.NewMetricHandler()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filename := filepath.FromSlash(tt.filename) + cfg, header, err := loadTestConfiguration(filename) + require.NoError(t, err) + cfg.MetricDefaultName = "xml" + + // Load the xml-content + input, err := testutil.ParseRawLinesFrom(header, "File:") + require.NoError(t, err) + require.Len(t, input, 1) + + filefields := strings.Fields(input[0]) + require.GreaterOrEqual(t, len(filefields), 1) + datafile := filepath.FromSlash(filefields[0]) + fileformat := "" + if len(filefields) > 1 { + fileformat = filefields[1] + } + + // Load the protocol buffer information if required + var pbmsgdef, pbmsgtype string + if fileformat == "xpath_protobuf" { + input, err := testutil.ParseRawLinesFrom(header, "Protobuf:") + require.NoError(t, err) + require.Len(t, input, 1) + + protofields := strings.Fields(input[0]) + require.Len(t, protofields, 2) + pbmsgdef = protofields[0] + pbmsgtype = protofields[1] + } + + content, err := os.ReadFile(datafile) + require.NoError(t, err) + + // Get the expectations + expectedOutputs, err := testutil.ParseMetricsFrom(header, "Expected Output:", parser) + require.NoError(t, err) + + expectedErrors, _ := testutil.ParseRawLinesFrom(header, "Expected Error:") + + // Setup the parser and run it. + parser := &Parser{ + Format: fileformat, + ProtobufMessageDef: pbmsgdef, + ProtobufMessageType: pbmsgtype, + Configs: []Config{*cfg}, + Log: testutil.Logger{Name: "parsers.xml"}, + } + require.NoError(t, parser.Init()) + outputs, err := parser.Parse(content) + if len(expectedErrors) == 0 { + require.NoError(t, err) + } + // If no timestamp is given we cannot test it. So use the one of the output + if cfg.Timestamp == "" { + testutil.RequireMetricsEqual(t, expectedOutputs, outputs, testutil.IgnoreTime()) + } else { + testutil.RequireMetricsEqual(t, expectedOutputs, outputs) + } + }) + } +} + +func loadTestConfiguration(filename string) (*Config, []string, error) { + buf, err := os.ReadFile(filename) + if err != nil { + return nil, nil, err + } + + header := make([]string, 0) + for _, line := range strings.Split(string(buf), "\n") { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "#") { + header = append(header, line) + } + } + cfg := Config{} + err = toml.Unmarshal(buf, &cfg) + return &cfg, header, err +} diff --git a/plugins/parsers/xpath/protocolbuffer_document.go b/plugins/parsers/xpath/protocolbuffer_document.go new file mode 100644 index 0000000000000..4ae88812d96bb --- /dev/null +++ b/plugins/parsers/xpath/protocolbuffer_document.go @@ -0,0 +1,161 @@ +package xpath + +import ( + "fmt" + "sort" + "strings" + + "github.com/influxdata/telegraf" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/jhump/protoreflect/desc/protoparse" + + path "github.com/antchfx/xpath" + "github.com/doclambda/protobufquery" +) + +type protobufDocument struct { + MessageDefinition string + MessageType string + Log telegraf.Logger + msg *dynamicpb.Message +} + +func (d *protobufDocument) Init() error { + // Check the message definition and type + if d.MessageDefinition == "" { + return fmt.Errorf("protocol-buffer message-definition not set") + } + if d.MessageType == "" { + return fmt.Errorf("protocol-buffer message-type not set") + } + + // Load the file descriptors from the given protocol-buffer definition + parser := protoparse.Parser{} + fds, err := parser.ParseFiles(d.MessageDefinition) + if err != nil { + return fmt.Errorf("parsing protocol-buffer definition in %q failed: %v", d.MessageDefinition, err) + } + if len(fds) < 1 { + return fmt.Errorf("file %q does not contain file descriptors", d.MessageDefinition) + } + + // Register all definitions in the file in the global registry + for _, fd := range fds { + if fd == nil { + continue + } + fileDescProto := fd.AsFileDescriptorProto() + fileDesc, err := protodesc.NewFile(fileDescProto, nil) + if err != nil { + return fmt.Errorf("creating file descriptor from proto failed: %v", err) + } + if err := protoregistry.GlobalFiles.RegisterFile(fileDesc); err != nil { + return fmt.Errorf("registering file descriptor %q failed: %v", fileDesc.Package(), err) + } + } + + // Lookup given type in the loaded file descriptors + msgFullName := protoreflect.FullName(d.MessageType) + desc, err := protoregistry.GlobalFiles.FindDescriptorByName(msgFullName) + if err != nil { + d.Log.Infof("Could not find %q... Known messages:", msgFullName) + + var known []string + protoregistry.GlobalFiles.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + name := strings.TrimSpace(string(fd.FullName())) + if name != "" { + known = append(known, name) + } + return true + }) + sort.Strings(known) + for _, name := range known { + d.Log.Infof(" %s", name) + } + return err + } + + // Get a prototypical message for later use + msgDesc, ok := desc.(protoreflect.MessageDescriptor) + if !ok { + return fmt.Errorf("%q is not a message descriptor (%T)", msgFullName, desc) + } + + d.msg = dynamicpb.NewMessage(msgDesc) + if d.msg == nil { + return fmt.Errorf("creating message template for %q failed", msgDesc.FullName()) + } + + return nil +} + +func (d *protobufDocument) Parse(buf []byte) (dataNode, error) { + msg := d.msg.New() + + // Unmarshal the received buffer + if err := proto.Unmarshal(buf, msg.Interface()); err != nil { + return nil, err + } + + return protobufquery.Parse(msg) +} + +func (d *protobufDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + // If this panics it's a programming error as we changed the document type while processing + native, err := protobufquery.QueryAll(node.(*protobufquery.Node), expr) + if err != nil { + return nil, err + } + + nodes := make([]dataNode, len(native)) + for i, n := range native { + nodes[i] = n + } + return nodes, nil +} + +func (d *protobufDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + // If this panics it's a programming error as we changed the document type while processing + return protobufquery.CreateXPathNavigator(node.(*protobufquery.Node)) +} + +func (d *protobufDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + names := make([]string, 0) + + // If these panic it's a programming error as we changed the document type while processing + nativeNode := node.(*protobufquery.Node) + nativeRelativeTo := relativeTo.(*protobufquery.Node) + + // Climb up the tree and collect the node names + n := nativeNode.Parent + for n != nil && n != nativeRelativeTo { + names = append(names, n.Name) + n = n.Parent + } + + if len(names) < 1 { + return "" + } + + // Construct the nodes + nodepath := "" + for _, name := range names { + nodepath = name + sep + nodepath + } + + return nodepath[:len(nodepath)-1] +} + +func (d *protobufDocument) OutputXML(node dataNode) string { + native := node.(*protobufquery.Node) + return native.OutputXML() +} + +func init() { +} diff --git a/plugins/parsers/xpath/testcases/addressbook.conf b/plugins/parsers/xpath/testcases/addressbook.conf new file mode 100644 index 0000000000000..eeca8921d7b16 --- /dev/null +++ b/plugins/parsers/xpath/testcases/addressbook.conf @@ -0,0 +1,28 @@ +# Example for parsing an example protocol buffer data. +# +# File: +# testcases/addressbook.dat xpath_protobuf +# +# Protobuf: +# testcases/addressbook.proto addressbook.AddressBook +# +# Expected Output: +# addresses,id=101,name=John\ Doe age=42i,email="john@example.com" 1621430181000000000 +# addresses,id=102,name=Jane\ Doe age=40i 1621430181000000000 +# addresses,id=201,name=Jack\ Doe age=12i,email="jack@example.com" 1621430181000000000 +# addresses,id=301,name=Jack\ Buck age=19i,email="buck@example.com" 1621430181000000000 +# addresses,id=1001,name=Janet\ Doe age=16i,email="janet@example.com" 1621430181000000000 +# + +metric_name = "'addresses'" +metric_selection = "//people" + +[tags] + id = "id" + name = "name" + +[fields_int] + age = "age" + +[fields] + email = "email" diff --git a/plugins/parsers/xpath/testcases/addressbook.dat b/plugins/parsers/xpath/testcases/addressbook.dat new file mode 100644 index 0000000000000..a5c1d8feefa70 --- /dev/null +++ b/plugins/parsers/xpath/testcases/addressbook.dat @@ -0,0 +1,17 @@ + + +John Doeejohn@example.com * + +Jane Doef ( +3 +Jack DoeÉjack@example.com * + 555-555-5555 +V + Jack Buck­buck@example.com * + 555-555-0000* + 555-555-0001* + 555-555-0002 +E + Janet Doeéjanet@example.com * + 555-777-0000* + 555-777-0001homeprivatefriends \ No newline at end of file diff --git a/plugins/parsers/xpath/testcases/addressbook.proto b/plugins/parsers/xpath/testcases/addressbook.proto new file mode 100644 index 0000000000000..3ed0eb566a987 --- /dev/null +++ b/plugins/parsers/xpath/testcases/addressbook.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package addressbook; + +message Person { + string name = 1; + int32 id = 2; // Unique ID number for this person. + string email = 3; + uint32 age = 4; + + enum PhoneType { + MOBILE = 0; + HOME = 1; + WORK = 2; + } + + message PhoneNumber { + string number = 1; + PhoneType type = 2; + } + + repeated PhoneNumber phones = 5; +} + +message AddressBook { + repeated Person people = 1; + repeated string tags = 2; +} diff --git a/plugins/parsers/xpath/testcases/earthquakes.conf b/plugins/parsers/xpath/testcases/earthquakes.conf new file mode 100644 index 0000000000000..8f02f4384721f --- /dev/null +++ b/plugins/parsers/xpath/testcases/earthquakes.conf @@ -0,0 +1,44 @@ +# Example for parsing QuakeML measurement data. +# +# File: +# testcases/earthquakes.quakeml +# +# Expected Output: +# earthquakes,agency=us,type=mww depth=13000,eventid="7000dg8x",lat=-37.6099,lon=179.6102,mag=6.3,station_count=33i 1614989782185000000 +# earthquakes,agency=us,type=mww depth=17000,eventid="7000dft1",lat=-28.7146,lon=-176.5582,mag=6.3,station_count=15i 1614911436571000000 +# earthquakes,agency=us,type=mww depth=26450,eventid="7000dflf",lat=-29.7347,lon=-177.2817,mag=8.1,station_count=81i 1614886112819000000 +# earthquakes,agency=us,type=mb depth=10000,eventid="7000dfku",lat=39.7886,lon=22.1189,mag=5.8,station_count=279i 1614883099415000000 +# earthquakes,agency=us,type=mww depth=53090,eventid="7000dfk3",lat=-29.6647,lon=-177.8343,mag=7.4,station_count=40i 1614879684425000000 +# earthquakes,agency=us,type=mww depth=20780,eventid="7000dffl",lat=-37.5628,lon=179.4443,mag=7.3,station_count=33i 1614864456464000000 +# earthquakes,agency=us,type=mww depth=10000,eventid="7000df40",lat=39.7641,lon=22.1756,mag=6.3,station_count=81i 1614766570197000000 +# earthquakes,type=mww depth=42100,eventid="0212o88mof",lat=61.3286,lon=-149.9991,mag=5.3 1614452365398000000 +# earthquakes,agency=us,type=mww depth=10000,eventid="6000dkmk",lat=63.9602,lon=-22.2736,mag=5.6,station_count=64i 1614161159873000000 +# earthquakes,agency=NC,type=mw depth=6220,eventid="73526151",lat=37.0456667,lon=-121.4781667,mag=3.76,station_count=3i 1613957893840000000 +# earthquakes,agency=US,type=mwr depth=7000,eventid="2021dmpg",lat=36.96366667,lon=-98.09383333,mag=4.2,station_count=39i 1613743017950000000 +# earthquakes,agency=us,type=mww depth=5590,eventid="6000dhxn",lat=-17.8192,lon=167.5901,mag=6.2,station_count=24i 1613436564078000000 +# earthquakes,agency=us,type=mww depth=49940,eventid="6000dher",lat=37.7453,lon=141.7494,mag=7.1,station_count=74i 1613225270397000000 +# earthquakes,agency=us,type=mww depth=98950,eventid="6000dh48",lat=38.1314,lon=73.545,mag=5.9,station_count=34i 1613149295308000000 +# earthquakes,agency=us,type=mww depth=10000,eventid="6000dg77",lat=-23.0508,lon=171.657,mag=7.7,station_count=54i 1612963195532000000 +# + +metric_selection = "//event" +metric_name = "string('earthquakes')" + +# Convert from milliseconds to nanoseconds as golang unfortunately +# only supports RFC3339 with second OR nanosecond precision. +timestamp = "replace(normalize-space(origin/time), 'Z', '000000Z')" +timestamp_format = "2006-01-02T15:04:05.999999999Z" + +[fields] + eventid = "@catalog:eventid" + lon = "number(origin/longitude/value)" + lat = "number(origin/latitude/value)" + depth = "number(origin/depth/value)" + mag = "number(magnitude/mag/value)" + +[fields_int] + station_count = "magnitude/stationCount" + +[tags] + agency = "magnitude/creationInfo/agencyID" + type = "magnitude/type" diff --git a/plugins/parsers/xpath/testcases/earthquakes.quakeml b/plugins/parsers/xpath/testcases/earthquakes.quakeml new file mode 100644 index 0000000000000..fa5f5fb7f63bd --- /dev/null +++ b/plugins/parsers/xpath/testcases/earthquakes.quakeml @@ -0,0 +1,20 @@ + + + +earthquake name182 km NE of Gisborne, New Zealand179.6102-37.60991300017008100horizontal uncertainty2901.04341.036manualus2021-03-09T03:01:59.040Z6.30.054mww33quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dg8x/us/1615258919040/product.xmlmanualus2021-03-09T03:01:59.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dg8x/us/1615258919040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dg8x/us/1615258919040/product.xml#magnitudeearthquakeus2021-03-09T03:05:51.084Z +earthquake nameKermadec Islands region-176.5582-28.71461700018009800horizontal uncertainty891.25419.815manualus2021-03-05T18:47:44.040Z6.30.08mww15quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dft1/us/1614970064040/product.xmlmanualus2021-03-05T18:47:44.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dft1/us/1614970064040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dft1/us/1614970064040/product.xml#magnitudeearthquakeus2021-03-06T02:34:07.561Z +earthquake nameKermadec Islands, New Zealand-177.2817-29.73472645037007800horizontal uncertainty1300.67210.746manualus2021-03-05T18:08:31.040Z8.10.034mww81quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dflf/us/1614967711040/product.xmlmanualus2021-03-05T18:08:31.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dflf/us/1614967711040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dflf/us/1614967711040/product.xml#magnitudeearthquakeus2021-03-09T18:52:08.298Z +earthquake nameGreece22.118939.78861000018005200horizontal uncertainty1400.9190.424manualus2021-03-05T15:03:03.040Z5.80.036mb279quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfku/us/1614956583040/product.xmlmanualus2021-03-05T15:03:03.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfku/us/1614956583040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfku/us/1614956583040/product.xml#magnitudeearthquakeus2021-03-07T08:43:06.987Z +earthquake nameKermadec Islands, New Zealand-177.8343-29.66475309036007800horizontal uncertainty1321.14300.426manualus2021-03-05T13:49:34.040Z7.40.049mww40quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfk3/us/1614952174040/product.xmlmanualus2021-03-05T13:49:34.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfk3/us/1614952174040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfk3/us/1614952174040/product.xml#magnitudeearthquakeus2021-03-09T18:42:04.756Z +earthquake name174 km NE of Gisborne, New Zealand179.4443-37.56282078032006600horizontal uncertainty1411.35230.904manualus2021-03-04T15:08:47.040Z7.30.054mww33quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dffl/us/1614870527040/product.xmlmanualus2021-03-04T15:08:47.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dffl/us/1614870527040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dffl/us/1614870527040/product.xml#magnitudeearthquakeus2021-03-10T21:54:32.975Z +earthquake name10 km WNW of Týrnavos, Greece22.175639.76411000018005400horizontal uncertainty1291.05170.415manualus2021-03-03T10:31:58.040Z6.30.034mww81quakeml:earthquake.usgs.gov/realtime/product/origin/us7000df40/us/1614767518040/product.xmlmanualus2021-03-03T10:31:58.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000df40/us/1614767518040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000df40/us/1614767518040/product.xml#magnitudeearthquakeus2021-03-08T04:19:29.249Z +earthquake name3 km SSW of Point MacKenzie, Alaska-149.999161.3286421003000horizontal uncertainty1340.86manual2021-02-27T19:20:59.442Z25.3mwwquakeml:earthquake.usgs.gov/realtime/product/origin/AK0212o88mof/ak/1614453659442/product.xmlmanual2021-02-27T19:20:59.442Zquakeml:earthquake.usgs.gov/realtime/product/origin/AK0212o88mof/ak/1614453659442/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/AK0212o88mof/ak/1614453659442/product.xml#magnitudeearthquakeak2021-03-10T19:09:33.840Z2 +earthquake name5 km ESE of Vogar, Iceland-22.273663.96021000018005600horizontal uncertainty1291.22460.891manualus2021-02-24T15:05:24.040Z5.60.039mww64quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dkmk/us/1614179124040/product.xmlmanualus2021-02-24T15:05:24.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dkmk/us/1614179124040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dkmk/us/1614179124040/product.xml#magnitudeearthquakeus2021-03-07T02:32:18.760Z +earthquake name9km ENE of Gilroy, CA-121.478166737.0456667622024090horizontal uncertainty1781640.15330.02089manualNC2021-02-23T00:54:06.560Z103.76mw3quakeml:earthquake.usgs.gov/realtime/product/origin/nc73526151/nc/1614041646560/product.xmlmanualNC2021-02-23T00:54:06.560Zquakeml:earthquake.usgs.gov/realtime/product/origin/nc73526151/nc/1614041646560/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/nc73526151/nc/1614041646560/product.xml#magnitudeearthquakenc2021-03-04T06:33:36.782Z10 +earthquake name6 km SW of Manchester, Oklahoma-98.0938333336.9636666770003000horizontal uncertainty182980.15960manualOK2021-02-19T14:42:10.861Z4.2mwr39quakeml:earthquake.usgs.gov/realtime/product/origin/ogs2021dmpg/ok/1613745730861/product.xmlmanualUS2021-02-19T14:42:10.861Zquakeml:earthquake.usgs.gov/realtime/product/origin/ogs2021dmpg/ok/1613745730861/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/ogs2021dmpg/ok/1613745730861/product.xml#magnitudeearthquakeok2021-03-05T02:13:24.659Z +earthquake name77 km W of Port-Vila, Vanuatu167.5901-17.8192559033007400horizontal uncertainty3860.86323.666manualus2021-02-19T03:36:41.040Z6.20.063mww24quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dhxn/us/1613705801040/product.xmlmanualus2021-02-19T03:36:41.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dhxn/us/1613705801040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dhxn/us/1613705801040/product.xml#magnitudeearthquakeus2021-03-04T11:07:03.880Z +earthquake name72 km ENE of Namie, Japan141.749437.74534994035007000horizontal uncertainty1441.12333.073manualus2021-02-14T22:04:22.040Z7.10.036mww74quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dher/us/1613340262040/product.xmlmanualus2021-02-14T22:04:22.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dher/us/1613340262040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dher/us/1613340262040/product.xml#magnitudeearthquakeus2021-03-05T13:32:14.760Z +earthquake name37 km W of Murghob, Tajikistan73.54538.13149895012005400horizontal uncertainty2980.91161.915manualus2021-02-18T17:53:33.040Z5.90.053mww34quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dh48/us/1613670813040/product.xmlmanualus2021-02-18T17:53:33.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dh48/us/1613670813040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dh48/us/1613670813040/product.xml#magnitudeearthquakeus2021-03-04T10:24:38.562Z +earthquake namesoutheast of the Loyalty Islands171.657-23.05081000018007800horizontal uncertainty2700.42157.988manualus2021-03-08T07:54:50.040Z7.70.042mww54quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dg77/us/1615190090040/product.xmlmanualus2021-03-08T07:54:50.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dg77/us/1615190090040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dg77/us/1615190090040/product.xml#magnitudeearthquakeus2021-03-08T08:07:24.427Z +2021-03-11T11:55:37.000Z + \ No newline at end of file diff --git a/plugins/parsers/xpath/testcases/multisensor.xml b/plugins/parsers/xpath/testcases/multisensor.xml new file mode 100644 index 0000000000000..88cb0eaadc23d --- /dev/null +++ b/plugins/parsers/xpath/testcases/multisensor.xml @@ -0,0 +1,31 @@ + + + Main Gateway + 2020-08-01T15:04:03Z + 12 + ok + + + + + + + + + busy + + + + + + + standby + + + + + + + error + + diff --git a/plugins/parsers/xpath/testcases/multisensor_explicit_basic.conf b/plugins/parsers/xpath/testcases/multisensor_explicit_basic.conf new file mode 100644 index 0000000000000..faf2c85a243a5 --- /dev/null +++ b/plugins/parsers/xpath/testcases/multisensor_explicit_basic.conf @@ -0,0 +1,17 @@ +# Simple example for using the xml-parser. +# +# File: +# testcases/multisensor.xml +# +# Expected Output: +# xml,gateway=Main seqnr=12i,ok=true +# + +[tags] + gateway = "substring-before(/Gateway/Name, ' ')" + +[fields_int] + seqnr = "/Gateway/Sequence" + +[fields] + ok = "/Gateway/Status = 'ok'" diff --git a/plugins/parsers/xpath/testcases/multisensor_explicit_batch.conf b/plugins/parsers/xpath/testcases/multisensor_explicit_batch.conf new file mode 100644 index 0000000000000..354462d1e5693 --- /dev/null +++ b/plugins/parsers/xpath/testcases/multisensor_explicit_batch.conf @@ -0,0 +1,28 @@ +# Example for explicitly selecting fields from a bunch of selected metrics. +# +# File: +# testcases/multisensor.xml +# +# Expected Output: +# sensors,name=Facility\ A consumers=3i,frequency=49.78,power=123.4,temperature=20,ok=true 1596294243000000000 +# sensors,name=Facility\ B consumers=1i,frequency=49.78,power=14.3,temperature=23.1,ok=true 1596294243000000000 +# sensors,name=Facility\ C consumers=0i,frequency=49.78,power=0.02,temperature=19.7,ok=false 1596294243000000000 +# + +metric_selection = "/Bus/child::Sensor" +metric_name = "string('sensors')" + +timestamp = "/Gateway/Timestamp" +timestamp_format = "2006-01-02T15:04:05Z" + +[tags] + name = "substring-after(@name, ' ')" + +[fields_int] + consumers = "Variable/@consumers" + +[fields] + temperature = "number(Variable/@temperature)" + power = "number(Variable/@power)" + frequency = "number(Variable/@frequency)" + ok = "Mode != 'error'" diff --git a/plugins/parsers/xpath/testcases/multisensor_selection_batch.conf b/plugins/parsers/xpath/testcases/multisensor_selection_batch.conf new file mode 100644 index 0000000000000..d9ed1cd89d56e --- /dev/null +++ b/plugins/parsers/xpath/testcases/multisensor_selection_batch.conf @@ -0,0 +1,23 @@ +# Example for batch selecting fields from a bunch of selected metrics. +# +# File: +# testcases/multisensor.xml +# +# Expected Output: +# sensors,name=Facility\ A consumers=3,frequency=49.78,power=123.4,temperature=20 1596294243000000000 +# sensors,name=Facility\ B consumers=1,frequency=49.78,power=14.3,temperature=23.1 1596294243000000000 +# sensors,name=Facility\ C consumers=0,frequency=49.78,power=0.02,temperature=19.7 1596294243000000000 +# + +metric_selection = "/Bus/child::Sensor" +metric_name = "string('sensors')" + +timestamp = "/Gateway/Timestamp" +timestamp_format = "2006-01-02T15:04:05Z" + +field_selection = "child::Variable" +field_name = "name(@*[1])" +field_value = "number(@*[1])" + +[tags] + name = "substring-after(@name, ' ')" diff --git a/plugins/parsers/xpath/testcases/openweathermap_5d.json b/plugins/parsers/xpath/testcases/openweathermap_5d.json new file mode 100644 index 0000000000000..c8e4dccd45890 --- /dev/null +++ b/plugins/parsers/xpath/testcases/openweathermap_5d.json @@ -0,0 +1,127 @@ +{ + "cod": "200", + "message": 0.0179, + "cnt": 96, + "list": [ + { + "dt": 1596632400, + "main": { + "temp": 280.16, + "feels_like": 280.41, + "temp_min": 280.16, + "temp_max": 280.16, + "pressure": 1010, + "sea_level": 1010, + "grnd_level": 1010, + "humidity": 70, + "temp_kf": 0 + }, + "weather": [ + { + "id": 804, + "main": "Clouds", + "description": "overcast clouds", + "icon": "04n" + } + ], + "clouds": { + "all": 100 + }, + "wind": { + "speed": 2.03, + "deg": 252, + "gust":5.46 + }, + "visibility": 10000, + "pop": 0.04, + "sys": { + "pod": "n" + }, + "dt_txt": "2020-08-05 13:00:00" + }, + { + "dt": 159663600, + "main": { + "temp": 281.16, + "feels_like": 281.41, + "temp_min": 281.16, + "temp_max": 281.16, + "pressure": 1011, + "sea_level": 1011, + "grnd_level": 1011, + "humidity": 71, + "temp_kf": 0 + }, + "weather": [ + { + "id": 804, + "main": "Clouds", + "description": "overcast clouds", + "icon": "04n" + } + ], + "clouds": { + "all": 100 + }, + "wind": { + "speed": 2.03, + "deg": 252, + "gust":5.46 + }, + "visibility": 10000, + "pop": 0.04, + "sys": { + "pod": "n" + }, + "dt_txt": "2020-08-05 14:00:00" + }, + { + "dt": 159667200, + "main": { + "temp": 282.16, + "feels_like": 282.41, + "temp_min": 282.16, + "temp_max": 282.16, + "pressure": 1012, + "sea_level": 1012, + "grnd_level": 1012, + "humidity": 71, + "temp_kf": 0 + }, + "weather": [ + { + "id": 804, + "main": "Clouds", + "description": "overcast clouds", + "icon": "04n" + } + ], + "clouds": { + "all": 100 + }, + "wind": { + "speed": 2.03, + "deg": 252, + "gust":5.46 + }, + "visibility": 10000, + "pop": 0.04, + "sys": { + "pod": "n" + }, + "dt_txt": "2020-08-05 15:00:00" + } + ], + "city": { + "id": 2643743, + "name": "London", + "coord": { + "lat": 51.5085, + "lon": -0.1258 + }, + "country": "GB", + "timezone": 0, + "sunrise": 1568958164, + "sunset": 1569002733 + } +} diff --git a/plugins/parsers/xpath/testcases/openweathermap_5d.xml b/plugins/parsers/xpath/testcases/openweathermap_5d.xml new file mode 100644 index 0000000000000..2b7dc83a5b86b --- /dev/null +++ b/plugins/parsers/xpath/testcases/openweathermap_5d.xml @@ -0,0 +1,38 @@ + + + + + London + + GB + 3600 + + + + 2015-06-30T00:00:00Z + + + + + + + diff --git a/plugins/parsers/xpath/testcases/openweathermap_json.conf b/plugins/parsers/xpath/testcases/openweathermap_json.conf new file mode 100644 index 0000000000000..d9b3e04b692eb --- /dev/null +++ b/plugins/parsers/xpath/testcases/openweathermap_json.conf @@ -0,0 +1,29 @@ +# Example for parsing openweathermap five-day-forecast data. +# +# File: +# testcases/openweathermap_5d.json xpath_json +# +# Expected Output: +# weather,city=London,country=GB humidity=70i,clouds=100i,wind_direction=252,wind_speed=2.03,temperature=137.86666666666667,precipitation=0 1596632400000000000 +# weather,city=London,country=GB wind_direction=252,wind_speed=2.03,temperature=138.42222222222225,precipitation=0,clouds=100i,humidity=71i 159663600000000000 +# weather,city=London,country=GB humidity=71i,clouds=100i,wind_direction=252,wind_speed=2.03,temperature=138.9777777777778,precipitation=0 159667200000000000 +# + +metric_name = "'weather'" +metric_selection = "//list/*" +timestamp = "dt" +timestamp_format = "unix" + +[tags] + city = "/city/name" + country = "/city/country" + +[fields_int] + humidity = "main/humidity" + clouds = "clouds/all" + +[fields] + precipitation = "number(main/precipitation)" + wind_direction = "number(wind/deg)" + wind_speed = "number(wind/speed)" + temperature = "(number(main/temp) - 32.0)*(5.0 div 9.0)" diff --git a/plugins/parsers/xpath/testcases/openweathermap_xml.conf b/plugins/parsers/xpath/testcases/openweathermap_xml.conf new file mode 100644 index 0000000000000..57b63cebdc694 --- /dev/null +++ b/plugins/parsers/xpath/testcases/openweathermap_xml.conf @@ -0,0 +1,28 @@ +# Example for parsing openweathermap five-day-forecast data. +# +# File: +# testcases/openweathermap_5d.xml xml +# +# Expected Output: +# weather,city=London,country=GB clouds=64i,humidity=96i,precipitation=5,temperature=16.89,wind_direction=253.5,wind_speed=4.9 1435654800000000000 +# weather,city=London,country=GB clouds=44i,humidity=97i,precipitation=99,temperature=17.23,wind_direction=248.001,wind_speed=4.86 1435665600000000000 +# + +metric_name = "'weather'" +metric_selection = "//forecast/*" +timestamp = "@from" +timestamp_format = "2006-01-02T15:04:05" + +[tags] + city = "/weatherdata/location/name" + country = "/weatherdata/location/country" + +[fields_int] + humidity = "humidity/@value" + clouds = "clouds/@all" + +[fields] + precipitation = "number(precipitation/@value)" + wind_direction = "number(windDirection/@deg)" + wind_speed = "number(windSpeed/@mps)" + temperature = "number(temperature/@value)" diff --git a/plugins/parsers/xpath/testcases/tracker.msg b/plugins/parsers/xpath/testcases/tracker.msg new file mode 100644 index 0000000000000..3120a4321ed15 --- /dev/null +++ b/plugins/parsers/xpath/testcases/tracker.msg @@ -0,0 +1 @@ +„£geo’Ë@BåsEËÀ^™ŽMîˆy¦device¨TrackerA¤infoƒ§quality­serial_number¬123abc456def£fixétimestampÎ`ÔV¨ \ No newline at end of file diff --git a/plugins/parsers/xpath/testcases/tracker_msgpack.conf b/plugins/parsers/xpath/testcases/tracker_msgpack.conf new file mode 100644 index 0000000000000..168ad2cc97e4f --- /dev/null +++ b/plugins/parsers/xpath/testcases/tracker_msgpack.conf @@ -0,0 +1,24 @@ +# Example for parsing openweathermap five-day-forecast data. +# +# File: +# testcases/tracker.msg xpath_msgpack +# +# Expected Output: +# tracker,device=TrackerA,fixation=true serial="123abc456def",lat=37.78980863758897,lon=-122.39931057256935,quality=2i 1624528552000000000 +# + +metric_name = "'tracker'" +timestamp = "timestamp" +timestamp_format = "unix" + +[tags] + device = "device" + fixation = "info/fix" + +[fields_int] + quality = "info/quality" + +[fields] + serial = "info/serial_number" + lat = "number(/geo/*[1])" + lon = "number(/geo/*[2])" diff --git a/plugins/parsers/xpath/xml_document.go b/plugins/parsers/xpath/xml_document.go new file mode 100644 index 0000000000000..f2059b4c8333c --- /dev/null +++ b/plugins/parsers/xpath/xml_document.go @@ -0,0 +1,65 @@ +package xpath + +import ( + "strings" + + "github.com/antchfx/xmlquery" + path "github.com/antchfx/xpath" +) + +type xmlDocument struct{} + +func (d *xmlDocument) Parse(buf []byte) (dataNode, error) { + return xmlquery.Parse(strings.NewReader(string(buf))) +} + +func (d *xmlDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + // If this panics it's a programming error as we changed the document type while processing + native, err := xmlquery.QueryAll(node.(*xmlquery.Node), expr) + if err != nil { + return nil, err + } + + nodes := make([]dataNode, len(native)) + for i, n := range native { + nodes[i] = n + } + return nodes, nil +} + +func (d *xmlDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + // If this panics it's a programming error as we changed the document type while processing + return xmlquery.CreateXPathNavigator(node.(*xmlquery.Node)) +} + +func (d *xmlDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + names := make([]string, 0) + + // If these panic it's a programming error as we changed the document type while processing + nativeNode := node.(*xmlquery.Node) + nativeRelativeTo := relativeTo.(*xmlquery.Node) + + // Climb up the tree and collect the node names + n := nativeNode.Parent + for n != nil && n != nativeRelativeTo { + names = append(names, n.Data) + n = n.Parent + } + + if len(names) < 1 { + return "" + } + + // Construct the nodes + nodepath := "" + for _, name := range names { + nodepath = name + sep + nodepath + } + + return nodepath[:len(nodepath)-1] +} + +func (d *xmlDocument) OutputXML(node dataNode) string { + native := node.(*xmlquery.Node) + return native.OutputXML(false) +} diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index c84ee81110ee5..faf6de1e25661 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -1,6 +1,8 @@ package all import ( + //Blank imports for plugins to register themselves + _ "github.com/influxdata/telegraf/plugins/processors/aws/ec2" _ "github.com/influxdata/telegraf/plugins/processors/clone" _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/date" diff --git a/plugins/processors/aws/ec2/README.md b/plugins/processors/aws/ec2/README.md new file mode 100644 index 0000000000000..c04e26c36f861 --- /dev/null +++ b/plugins/processors/aws/ec2/README.md @@ -0,0 +1,67 @@ +# AWS EC2 Metadata Processor Plugin + +AWS EC2 Metadata processor plugin appends metadata gathered from [AWS IMDS][] +to metrics associated with EC2 instances. + +[AWS IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + +## Configuration + +```toml +[[processors.aws_ec2]] + ## Available tags: + ## * accountId + ## * architecture + ## * availabilityZone + ## * billingProducts + ## * imageId + ## * instanceId + ## * instanceType + ## * kernelId + ## * pendingTime + ## * privateIp + ## * ramdiskId + ## * region + ## * version + imds_tags = [] + + ## EC2 instance tags retrieved with DescribeTags action. + ## In case tag is empty upon retrieval it's omitted when tagging metrics. + ## Note that in order for this to work, role attached to EC2 instance or AWS + ## credentials available from the environment must have a policy attached, that + ## allows ec2:DescribeTags. + ## + ## For more information see: + ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html + ec2_tags = [] + + ## Timeout for http requests made by against aws ec2 metadata endpoint. + timeout = "10s" + + ## ordered controls whether or not the metrics need to stay in the same order + ## this plugin received them in. If false, this plugin will change the order + ## with requests hitting cached results moving through immediately and not + ## waiting on slower lookups. This may cause issues for you if you are + ## depending on the order of metrics staying the same. If so, set this to true. + ## Keeping the metrics ordered may be slightly slower. + ordered = false + + ## max_parallel_calls is the maximum number of AWS API calls to be in flight + ## at the same time. + ## It's probably best to keep this number fairly low. + max_parallel_calls = 10 +``` + +## Example + +Append `accountId` and `instanceId` to metrics tags: + +```toml +[[processors.aws_ec2]] + tags = [ "accountId", "instanceId"] +``` + +```diff +- cpu,hostname=localhost time_idle=42 ++ cpu,hostname=localhost,accountId=123456789,instanceId=i-123456789123 time_idle=42 +``` diff --git a/plugins/processors/aws/ec2/ec2.go b/plugins/processors/aws/ec2/ec2.go new file mode 100644 index 0000000000000..088ec09c83f5f --- /dev/null +++ b/plugins/processors/aws/ec2/ec2.go @@ -0,0 +1,309 @@ +package ec2 + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/parallel" + "github.com/influxdata/telegraf/plugins/processors" +) + +type AwsEc2Processor struct { + ImdsTags []string `toml:"imds_tags"` + EC2Tags []string `toml:"ec2_tags"` + Timeout config.Duration `toml:"timeout"` + Ordered bool `toml:"ordered"` + MaxParallelCalls int `toml:"max_parallel_calls"` + + Log telegraf.Logger `toml:"-"` + imdsClient *imds.Client `toml:"-"` + imdsTags map[string]struct{} `toml:"-"` + ec2Client *ec2.Client `toml:"-"` + parallel parallel.Parallel `toml:"-"` + instanceID string `toml:"-"` +} + +const sampleConfig = ` + ## Instance identity document tags to attach to metrics. + ## For more information see: + ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html + ## + ## Available tags: + ## * accountId + ## * architecture + ## * availabilityZone + ## * billingProducts + ## * imageId + ## * instanceId + ## * instanceType + ## * kernelId + ## * pendingTime + ## * privateIp + ## * ramdiskId + ## * region + ## * version + imds_tags = [] + + ## EC2 instance tags retrieved with DescribeTags action. + ## In case tag is empty upon retrieval it's omitted when tagging metrics. + ## Note that in order for this to work, role attached to EC2 instance or AWS + ## credentials available from the environment must have a policy attached, that + ## allows ec2:DescribeTags. + ## + ## For more information see: + ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html + ec2_tags = [] + + ## Timeout for http requests made by against aws ec2 metadata endpoint. + timeout = "10s" + + ## ordered controls whether or not the metrics need to stay in the same order + ## this plugin received them in. If false, this plugin will change the order + ## with requests hitting cached results moving through immediately and not + ## waiting on slower lookups. This may cause issues for you if you are + ## depending on the order of metrics staying the same. If so, set this to true. + ## Keeping the metrics ordered may be slightly slower. + ordered = false + + ## max_parallel_calls is the maximum number of AWS API calls to be in flight + ## at the same time. + ## It's probably best to keep this number fairly low. + max_parallel_calls = 10 +` + +const ( + DefaultMaxOrderedQueueSize = 10_000 + DefaultMaxParallelCalls = 10 + DefaultTimeout = 10 * time.Second +) + +var allowedImdsTags = map[string]struct{}{ + "accountId": {}, + "architecture": {}, + "availabilityZone": {}, + "billingProducts": {}, + "imageId": {}, + "instanceId": {}, + "instanceType": {}, + "kernelId": {}, + "pendingTime": {}, + "privateIp": {}, + "ramdiskId": {}, + "region": {}, + "version": {}, +} + +func (r *AwsEc2Processor) SampleConfig() string { + return sampleConfig +} + +func (r *AwsEc2Processor) Description() string { + return "Attach AWS EC2 metadata to metrics" +} + +func (r *AwsEc2Processor) Add(metric telegraf.Metric, _ telegraf.Accumulator) error { + r.parallel.Enqueue(metric) + return nil +} + +func (r *AwsEc2Processor) Init() error { + r.Log.Debug("Initializing AWS EC2 Processor") + if len(r.EC2Tags) == 0 && len(r.ImdsTags) == 0 { + return errors.New("no tags specified in configuration") + } + + for _, tag := range r.ImdsTags { + if len(tag) == 0 || !isImdsTagAllowed(tag) { + return fmt.Errorf("not allowed metadata tag specified in configuration: %s", tag) + } + r.imdsTags[tag] = struct{}{} + } + if len(r.imdsTags) == 0 && len(r.EC2Tags) == 0 { + return errors.New("no allowed metadata tags specified in configuration") + } + + return nil +} + +func (r *AwsEc2Processor) Start(acc telegraf.Accumulator) error { + ctx := context.Background() + cfg, err := awsconfig.LoadDefaultConfig(ctx) + if err != nil { + return fmt.Errorf("failed loading default AWS config: %w", err) + } + r.imdsClient = imds.NewFromConfig(cfg) + + iido, err := r.imdsClient.GetInstanceIdentityDocument( + ctx, + &imds.GetInstanceIdentityDocumentInput{}, + ) + if err != nil { + return fmt.Errorf("failed getting instance identity document: %w", err) + } + + r.instanceID = iido.InstanceID + + if len(r.EC2Tags) > 0 { + // Add region to AWS config when creating EC2 service client since it's required. + cfg.Region = iido.Region + + r.ec2Client = ec2.NewFromConfig(cfg) + + // Chceck if instance is allowed to call DescribeTags. + _, err = r.ec2Client.DescribeTags(ctx, &ec2.DescribeTagsInput{ + DryRun: true, + }) + var ae smithy.APIError + if errors.As(err, &ae) { + if ae.ErrorCode() != "DryRunOperation" { + return fmt.Errorf("instance doesn't have permissions to call DescribeTags: %w", err) + } + } else if err != nil { + return fmt.Errorf("error calling DescribeTags: %w", err) + } + } + + if r.Ordered { + r.parallel = parallel.NewOrdered(acc, r.asyncAdd, DefaultMaxOrderedQueueSize, r.MaxParallelCalls) + } else { + r.parallel = parallel.NewUnordered(acc, r.asyncAdd, r.MaxParallelCalls) + } + + return nil +} + +func (r *AwsEc2Processor) Stop() error { + if r.parallel == nil { + return errors.New("Trying to stop unstarted AWS EC2 Processor") + } + r.parallel.Stop() + return nil +} + +func (r *AwsEc2Processor) asyncAdd(metric telegraf.Metric) []telegraf.Metric { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(r.Timeout)) + defer cancel() + + // Add IMDS Instance Identity Document tags. + if len(r.imdsTags) > 0 { + iido, err := r.imdsClient.GetInstanceIdentityDocument( + ctx, + &imds.GetInstanceIdentityDocumentInput{}, + ) + if err != nil { + r.Log.Errorf("Error when calling GetInstanceIdentityDocument: %v", err) + return []telegraf.Metric{metric} + } + + for tag := range r.imdsTags { + if v := getTagFromInstanceIdentityDocument(iido, tag); v != "" { + metric.AddTag(tag, v) + } + } + } + + // Add EC2 instance tags. + if len(r.EC2Tags) > 0 { + dto, err := r.ec2Client.DescribeTags(ctx, &ec2.DescribeTagsInput{ + Filters: createFilterFromTags(r.instanceID, r.EC2Tags), + }) + if err != nil { + r.Log.Errorf("Error during EC2 DescribeTags: %v", err) + return []telegraf.Metric{metric} + } + + for _, tag := range r.EC2Tags { + if v := getTagFromDescribeTags(dto, tag); v != "" { + metric.AddTag(tag, v) + } + } + } + + return []telegraf.Metric{metric} +} + +func init() { + processors.AddStreaming("aws_ec2", func() telegraf.StreamingProcessor { + return newAwsEc2Processor() + }) +} + +func newAwsEc2Processor() *AwsEc2Processor { + return &AwsEc2Processor{ + MaxParallelCalls: DefaultMaxParallelCalls, + Timeout: config.Duration(DefaultTimeout), + imdsTags: make(map[string]struct{}), + } +} + +func createFilterFromTags(instanceID string, tagNames []string) []types.Filter { + return []types.Filter{ + { + Name: aws.String("resource-id"), + Values: []string{instanceID}, + }, + { + Name: aws.String("key"), + Values: tagNames, + }, + } +} + +func getTagFromDescribeTags(o *ec2.DescribeTagsOutput, tag string) string { + for _, t := range o.Tags { + if *t.Key == tag { + return *t.Value + } + } + return "" +} + +func getTagFromInstanceIdentityDocument(o *imds.GetInstanceIdentityDocumentOutput, tag string) string { + switch tag { + case "accountId": + return o.AccountID + case "architecture": + return o.Architecture + case "availabilityZone": + return o.AvailabilityZone + case "billingProducts": + return strings.Join(o.BillingProducts, ",") + case "imageId": + return o.ImageID + case "instanceId": + return o.InstanceID + case "instanceType": + return o.InstanceType + case "kernelId": + return o.KernelID + case "pendingTime": + return o.PendingTime.String() + case "privateIp": + return o.PrivateIP + case "ramdiskId": + return o.RamdiskID + case "region": + return o.Region + case "version": + return o.Version + default: + return "" + } +} + +func isImdsTagAllowed(tag string) bool { + _, ok := allowedImdsTags[tag] + return ok +} diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go new file mode 100644 index 0000000000000..88fd661eb5c07 --- /dev/null +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -0,0 +1,57 @@ +package ec2 + +import ( + "testing" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestBasicStartup(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{"accountId", "instanceId"} + acc := &testutil.Accumulator{} + require.NoError(t, p.Init()) + + require.Len(t, acc.GetTelegrafMetrics(), 0) + require.Len(t, acc.Errors, 0) +} + +func TestBasicStartupWithEC2Tags(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{"accountId", "instanceId"} + p.EC2Tags = []string{"Name"} + acc := &testutil.Accumulator{} + require.NoError(t, p.Init()) + + require.Len(t, acc.GetTelegrafMetrics(), 0) + require.Len(t, acc.Errors, 0) +} + +func TestBasicInitNoTagsReturnAnError(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{} + err := p.Init() + require.Error(t, err) +} + +func TestBasicInitInvalidTagsReturnAnError(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{"dummy", "qwerty"} + err := p.Init() + require.Error(t, err) +} + +func TestLoadingConfig(t *testing.T) { + confFile := []byte("[[processors.aws_ec2]]" + "\n" + sampleConfig) + c := config.NewConfig() + err := c.LoadConfigData(confFile) + require.NoError(t, err) + + require.Len(t, c.Processors, 1) +} diff --git a/plugins/processors/clone/clone_test.go b/plugins/processors/clone/clone_test.go index f1b8dc5b29c03..20bec925e7acb 100644 --- a/plugins/processors/clone/clone_test.go +++ b/plugins/processors/clone/clone_test.go @@ -10,12 +10,12 @@ import ( ) func createTestMetric() telegraf.Metric { - metric, _ := metric.New("m1", + m := metric.New("m1", map[string]string{"metric_tag": "from_metric"}, map[string]interface{}{"value": int64(1)}, time.Now(), ) - return metric + return m } func calculateProcessedTags(processor Clone, metric telegraf.Metric) map[string]string { diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md index d916c87643bee..46a2e2ec6390a 100644 --- a/plugins/processors/converter/README.md +++ b/plugins/processors/converter/README.md @@ -9,6 +9,8 @@ Values that cannot be converted are dropped. uniquely identifiable. Fields with the same series key (measurement + tags) will overwrite one another. +**Note on large strings being converted to numeric types:** When converting a string value to a numeric type, precision may be lost if the number is too large. The largest numeric type this plugin supports is `float64`, and if a string 'number' exceeds its size limit, accuracy may be lost. + ### Configuration ```toml # Convert values to another metric value type diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index 55a2a2d0965dc..fd56cc4d9a6a8 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -1,9 +1,12 @@ package converter import ( + "errors" "fmt" "math" + "math/big" "strconv" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" @@ -349,9 +352,8 @@ func toInteger(v interface{}) (int64, bool) { case uint64: if value <= uint64(math.MaxInt64) { return int64(value), true - } else { - return math.MaxInt64, true } + return math.MaxInt64, true case float64: if value < float64(math.MinInt64) { return math.MinInt64, true @@ -363,17 +365,25 @@ func toInteger(v interface{}) (int64, bool) { case bool: if value { return 1, true - } else { - return 0, true } + return 0, true case string: result, err := strconv.ParseInt(value, 0, 64) if err != nil { - result, err := strconv.ParseFloat(value, 64) + var result float64 + var err error + + if isHexadecimal(value) { + result, err = parseHexadecimal(value) + } else { + result, err = strconv.ParseFloat(value, 64) + } + if err != nil { return 0, false } + return toInteger(result) } return result, true @@ -388,9 +398,8 @@ func toUnsigned(v interface{}) (uint64, bool) { case int64: if value < 0 { return 0, true - } else { - return uint64(value), true } + return uint64(value), true case float64: if value < 0.0 { return 0, true @@ -402,17 +411,25 @@ func toUnsigned(v interface{}) (uint64, bool) { case bool: if value { return 1, true - } else { - return 0, true } + return 0, true case string: result, err := strconv.ParseUint(value, 0, 64) if err != nil { - result, err := strconv.ParseFloat(value, 64) + var result float64 + var err error + + if isHexadecimal(value) { + result, err = parseHexadecimal(value) + } else { + result, err = strconv.ParseFloat(value, 64) + } + if err != nil { return 0, false } + return toUnsigned(result) } return result, true @@ -431,10 +448,14 @@ func toFloat(v interface{}) (float64, bool) { case bool: if value { return 1.0, true - } else { - return 0.0, true } + return 0.0, true case string: + if isHexadecimal(value) { + result, err := parseHexadecimal(value) + return result, err == nil + } + result, err := strconv.ParseFloat(value, 64) return result, err == nil } @@ -457,6 +478,24 @@ func toString(v interface{}) (string, bool) { return "", false } +func parseHexadecimal(value string) (float64, error) { + i := new(big.Int) + + _, success := i.SetString(value, 0) + if !success { + return 0, errors.New("unable to parse string to big int") + } + + f := new(big.Float).SetInt(i) + result, _ := f.Float64() + + return result, nil +} + +func isHexadecimal(value string) bool { + return len(value) >= 3 && strings.ToLower(value)[1] == 'x' +} + func init() { processors.Add("converter", func() telegraf.Processor { return &Converter{} diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 0a8200dbef449..b9e30c589a88d 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -432,6 +432,38 @@ func TestConverter(t *testing.T) { ), }, }, + { + name: "from string field hexidecimal", + converter: &Converter{ + Fields: &Conversion{ + Integer: []string{"a"}, + Unsigned: []string{"b"}, + Float: []string{"c"}, + }, + }, + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": "0x11826c", + "b": "0x11826c", + "c": "0x2139d19bb1c580ebe0", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(1147500), + "b": uint64(1147500), + "c": float64(612908836750534700000), + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go index ef8609811c1f7..b1705826dc912 100644 --- a/plugins/processors/date/date.go +++ b/plugins/processors/date/date.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/processors" ) @@ -37,11 +37,11 @@ const sampleConfig = ` const defaultTimezone = "UTC" type Date struct { - TagKey string `toml:"tag_key"` - FieldKey string `toml:"field_key"` - DateFormat string `toml:"date_format"` - DateOffset internal.Duration `toml:"date_offset"` - Timezone string `toml:"timezone"` + TagKey string `toml:"tag_key"` + FieldKey string `toml:"field_key"` + DateFormat string `toml:"date_format"` + DateOffset config.Duration `toml:"date_offset"` + Timezone string `toml:"timezone"` location *time.Location } @@ -70,7 +70,7 @@ func (d *Date) Init() error { func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, point := range in { - tm := point.Time().In(d.location).Add(d.DateOffset.Duration) + tm := point.Time().In(d.location).Add(time.Duration(d.DateOffset)) if len(d.TagKey) > 0 { point.AddTag(d.TagKey, tm.Format(d.DateFormat)) } else if len(d.FieldKey) > 0 { diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index 42e094c939c17..aa7efc64edbff 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -19,7 +19,7 @@ func MustMetric(name string, tags map[string]string, fields map[string]interface if fields == nil { fields = map[string]interface{}{} } - m, _ := metric.New(name, tags, fields, metricTime) + m := metric.New(name, tags, fields, metricTime) return m } @@ -30,7 +30,6 @@ func TestTagAndField(t *testing.T) { } err := dateFormatTagAndField.Init() require.Error(t, err) - } func TestNoOutputSpecified(t *testing.T) { @@ -166,7 +165,7 @@ func TestDateOffset(t *testing.T) { plugin := &Date{ TagKey: "hour", DateFormat: "15", - DateOffset: internal.Duration{Duration: 2 * time.Hour}, + DateOffset: config.Duration(2 * time.Hour), } err := plugin.Init() diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index 3dd7516a696c2..1ffe183256a1c 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -4,7 +4,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/processors" ) @@ -14,7 +14,7 @@ var sampleConfig = ` ` type Dedup struct { - DedupInterval internal.Duration `toml:"dedup_interval"` + DedupInterval config.Duration `toml:"dedup_interval"` FlushTime time.Time Cache map[uint64]telegraf.Metric } @@ -36,13 +36,13 @@ func remove(slice []telegraf.Metric, i int) []telegraf.Metric { // Remove expired items from cache func (d *Dedup) cleanup() { // No need to cleanup cache too often. Lets save some CPU - if time.Since(d.FlushTime) < d.DedupInterval.Duration { + if time.Since(d.FlushTime) < time.Duration(d.DedupInterval) { return } d.FlushTime = time.Now() - keep := make(map[uint64]telegraf.Metric, 0) + keep := make(map[uint64]telegraf.Metric) for id, metric := range d.Cache { - if time.Since(metric.Time()) < d.DedupInterval.Duration { + if time.Since(metric.Time()) < time.Duration(d.DedupInterval) { keep[id] = metric } } @@ -68,7 +68,7 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { } // If cache item has expired then refresh it - if time.Since(m.Time()) >= d.DedupInterval.Duration { + if time.Since(m.Time()) >= time.Duration(d.DedupInterval) { d.save(metric, id) continue } @@ -120,7 +120,7 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { func init() { processors.Add("dedup", func() telegraf.Processor { return &Dedup{ - DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + DedupInterval: config.Duration(10 * time.Minute), FlushTime: time.Now(), Cache: make(map[uint64]telegraf.Metric), } diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go index cae2bf1a529ed..4f3d109345b32 100644 --- a/plugins/processors/dedup/dedup_test.go +++ b/plugins/processors/dedup/dedup_test.go @@ -7,12 +7,14 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" ) -func createMetric(name string, value int64, when time.Time) telegraf.Metric { - m, _ := metric.New(name, +const metricName = "m1" + +func createMetric(value int64, when time.Time) telegraf.Metric { + m := metric.New(metricName, map[string]string{"tag": "tag_value"}, map[string]interface{}{"value": value}, when, @@ -22,7 +24,7 @@ func createMetric(name string, value int64, when time.Time) telegraf.Metric { func createDedup(initTime time.Time) Dedup { return Dedup{ - DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + DedupInterval: config.Duration(10 * time.Minute), FlushTime: initTime, Cache: make(map[uint64]telegraf.Metric), } @@ -70,7 +72,7 @@ func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf. // target is not empty require.NotEqual(t, 0, len(target)) // target has metric with proper name - require.Equal(t, "m1", target[0].Name()) + require.Equal(t, metricName, target[0].Name()) // target metric has proper field tValue, present := target[0].GetField("value") require.True(t, present) @@ -80,14 +82,14 @@ func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf. require.Equal(t, target[0].Time(), source.Time()) } -func assertMetricSuppressed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) { +func assertMetricSuppressed(t *testing.T, target []telegraf.Metric) { // target is empty require.Equal(t, 0, len(target)) } func TestProcRetainsMetric(t *testing.T) { deduplicate := createDedup(time.Now()) - source := createMetric("m1", 1, time.Now()) + source := createMetric(1, time.Now()) target := deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -97,21 +99,21 @@ func TestProcRetainsMetric(t *testing.T) { func TestSuppressRepeatedValue(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past - source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + source := createMetric(1, time.Now().Add(-1*time.Second)) target := deduplicate.Apply(source) - source = createMetric("m1", 1, time.Now()) + source = createMetric(1, time.Now()) target = deduplicate.Apply(source) assertCacheHit(t, &deduplicate, source) - assertMetricSuppressed(t, target, source) + assertMetricSuppressed(t, target) } func TestPassUpdatedValue(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past - source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + source := createMetric(1, time.Now().Add(-1*time.Second)) target := deduplicate.Apply(source) - source = createMetric("m1", 2, time.Now()) + source = createMetric(2, time.Now()) target = deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -121,9 +123,9 @@ func TestPassUpdatedValue(t *testing.T) { func TestPassAfterCacheExpire(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past - source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + source := createMetric(1, time.Now().Add(-1*time.Hour)) target := deduplicate.Apply(source) - source = createMetric("m1", 1, time.Now()) + source = createMetric(1, time.Now()) target = deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -133,12 +135,12 @@ func TestPassAfterCacheExpire(t *testing.T) { func TestCacheRetainsMetrics(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past 3sec - source := createMetric("m1", 1, time.Now().Add(-3*time.Hour)) + source := createMetric(1, time.Now().Add(-3*time.Hour)) deduplicate.Apply(source) // Create metric in the past 2sec - source = createMetric("m1", 1, time.Now().Add(-2*time.Hour)) + source = createMetric(1, time.Now().Add(-2*time.Hour)) deduplicate.Apply(source) - source = createMetric("m1", 1, time.Now()) + source = createMetric(1, time.Now()) deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -148,7 +150,7 @@ func TestCacheShrink(t *testing.T) { // Time offset is more than 2 * DedupInterval deduplicate := createDedup(time.Now().Add(-2 * time.Hour)) // Time offset is more than 1 * DedupInterval - source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + source := createMetric(1, time.Now().Add(-1*time.Hour)) deduplicate.Apply(source) require.Equal(t, 0, len(deduplicate.Cache)) @@ -160,7 +162,7 @@ func TestSameTimestamp(t *testing.T) { var in telegraf.Metric var out []telegraf.Metric - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"foo": 1}, // field now, @@ -168,7 +170,7 @@ func TestSameTimestamp(t *testing.T) { out = dedup.Apply(in) require.Equal(t, []telegraf.Metric{in}, out) // pass - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"bar": 1}, // different field now, @@ -176,7 +178,7 @@ func TestSameTimestamp(t *testing.T) { out = dedup.Apply(in) require.Equal(t, []telegraf.Metric{in}, out) // pass - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"bar": 2}, // same field different value now, @@ -184,7 +186,7 @@ func TestSameTimestamp(t *testing.T) { out = dedup.Apply(in) require.Equal(t, []telegraf.Metric{in}, out) // pass - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"bar": 2}, // same field same value now, diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 72a0556252902..0aecaaa430474 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -2,7 +2,7 @@ The Enum Processor allows the configuration of value mappings for metric tags or fields. The main use-case for this is to rewrite status codes such as _red_, _amber_ and -_green_ by numeric values such as 0, 1, 2. The plugin supports string, int and bool +_green_ by numeric values such as 0, 1, 2. The plugin supports string, int, float64 and bool types for the field values. Multiple tags or fields can be configured with separate value mappings for each. Default mapping values can be configured to be used for all values, which are not contained in the value_mappings. The @@ -14,10 +14,10 @@ source tag or field is overwritten. ```toml [[processors.enum]] [[processors.enum.mapping]] - ## Name of the field to map + ## Name of the field to map. Globs accepted. field = "status" - ## Name of the tag to map + ## Name of the tag to map. Globs accepted. # tag = "status" ## Destination tag or field to be used for the mapped value. By default the diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index a96e7d5095bcf..f31987775b6a5 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -5,15 +5,16 @@ import ( "strconv" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/processors" ) var sampleConfig = ` [[processors.enum.mapping]] - ## Name of the field to map + ## Name of the field to map. Globs accepted. field = "status" - ## Name of the tag to map + ## Name of the tag to map. Globs accepted. # tag = "status" ## Destination tag or field to be used for the mapped value. By default the @@ -34,6 +35,9 @@ var sampleConfig = ` type EnumMapper struct { Mappings []Mapping `toml:"mapping"` + + FieldFilters map[string]filter.Filter + TagFilters map[string]filter.Filter } type Mapping struct { @@ -44,6 +48,29 @@ type Mapping struct { ValueMappings map[string]interface{} } +func (mapper *EnumMapper) Init() error { + mapper.FieldFilters = make(map[string]filter.Filter) + mapper.TagFilters = make(map[string]filter.Filter) + for _, mapping := range mapper.Mappings { + if mapping.Field != "" { + fieldFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Field}, nil) + if err != nil { + return fmt.Errorf("failed to create new field filter: %w", err) + } + mapper.FieldFilters[mapping.Field] = fieldFilter + } + if mapping.Tag != "" { + tagFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Tag}, nil) + if err != nil { + return fmt.Errorf("failed to create new tag filter: %s", err) + } + mapper.TagFilters[mapping.Tag] = tagFilter + } + } + + return nil +} + func (mapper *EnumMapper) SampleConfig() string { return sampleConfig } @@ -60,30 +87,56 @@ func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric { } func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric { + newFields := make(map[string]interface{}) + newTags := make(map[string]string) + for _, mapping := range mapper.Mappings { if mapping.Field != "" { - if originalValue, isPresent := metric.GetField(mapping.Field); isPresent { - if adjustedValue, isString := adjustValue(originalValue).(string); isString { - if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent { - writeField(metric, mapping.getDestination(), mappedValue) - } + mapper.fieldMapping(metric, mapping, newFields) + } + if mapping.Tag != "" { + mapper.tagMapping(metric, mapping, newTags) + } + } + + for k, v := range newFields { + writeField(metric, k, v) + } + + for k, v := range newTags { + writeTag(metric, k, v) + } + + return metric +} + +func (mapper *EnumMapper) fieldMapping(metric telegraf.Metric, mapping Mapping, newFields map[string]interface{}) { + fields := metric.FieldList() + for _, f := range fields { + if mapper.FieldFilters[mapping.Field].Match(f.Key) { + if adjustedValue, isString := adjustValue(f.Value).(string); isString { + if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent { + newFields[mapping.getDestination(f.Key)] = mappedValue } } } - if mapping.Tag != "" { - if originalValue, isPresent := metric.GetTag(mapping.Tag); isPresent { - if mappedValue, isMappedValuePresent := mapping.mapValue(originalValue); isMappedValuePresent { - switch val := mappedValue.(type) { - case string: - writeTag(metric, mapping.getDestinationTag(), val) - default: - writeTag(metric, mapping.getDestinationTag(), fmt.Sprintf("%v", val)) - } + } +} + +func (mapper *EnumMapper) tagMapping(metric telegraf.Metric, mapping Mapping, newTags map[string]string) { + tags := metric.TagList() + for _, t := range tags { + if mapper.TagFilters[mapping.Tag].Match(t.Key) { + if mappedValue, isMappedValuePresent := mapping.mapValue(t.Value); isMappedValuePresent { + switch val := mappedValue.(type) { + case string: + newTags[mapping.getDestination(t.Key)] = val + default: + newTags[mapping.getDestination(t.Key)] = fmt.Sprintf("%v", val) } } } } - return metric } func adjustValue(in interface{}) interface{} { @@ -92,6 +145,8 @@ func adjustValue(in interface{}) interface{} { return strconv.FormatBool(val) case int64: return strconv.FormatInt(val, 10) + case float64: + return strconv.FormatFloat(val, 'f', -1, 64) case uint64: return strconv.FormatUint(val, 10) default: @@ -100,7 +155,7 @@ func adjustValue(in interface{}) interface{} { } func (mapping *Mapping) mapValue(original string) (interface{}, bool) { - if mapped, found := mapping.ValueMappings[original]; found == true { + if mapped, found := mapping.ValueMappings[original]; found { return mapped, true } if mapping.Default != nil { @@ -109,18 +164,11 @@ func (mapping *Mapping) mapValue(original string) (interface{}, bool) { return original, false } -func (mapping *Mapping) getDestination() string { - if mapping.Dest != "" { - return mapping.Dest - } - return mapping.Field -} - -func (mapping *Mapping) getDestinationTag() string { +func (mapping *Mapping) getDestination(defaultDest string) string { if mapping.Dest != "" { return mapping.Dest } - return mapping.Tag + return defaultDest } func writeField(metric telegraf.Metric, name string, value interface{}) { diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index de13aad156f5c..53603ae0153c7 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -7,21 +7,26 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func createTestMetric() telegraf.Metric { - metric, _ := metric.New("m1", - map[string]string{"tag": "tag_value"}, + m := metric.New("m1", + map[string]string{ + "tag": "tag_value", + "duplicate_tag": "tag_value", + }, map[string]interface{}{ - "string_value": "test", - "int_value": int(200), - "uint_value": uint(500), - "float_value": float64(3.14), - "true_value": true, + "string_value": "test", + "duplicate_string_value": "test", + "int_value": int(200), + "uint_value": uint(500), + "float_value": float64(3.14), + "true_value": true, }, time.Now(), ) - return metric + return m } func calculateProcessedValues(mapper EnumMapper, metric telegraf.Metric) map[string]interface{} { @@ -48,6 +53,8 @@ func assertTagValue(t *testing.T, expected interface{}, tag string, tags map[str func TestRetainsMetric(t *testing.T) { mapper := EnumMapper{} + err := mapper.Init() + require.Nil(t, err) source := createTestMetric() target := mapper.Apply(source)[0] @@ -56,6 +63,7 @@ func TestRetainsMetric(t *testing.T) { assertFieldValue(t, "test", "string_value", fields) assertFieldValue(t, 200, "int_value", fields) assertFieldValue(t, 500, "uint_value", fields) + assertFieldValue(t, float64(3.14), "float_value", fields) assertFieldValue(t, true, "true_value", fields) assert.Equal(t, "m1", target.Name()) assert.Equal(t, source.Tags(), target.Tags()) @@ -64,20 +72,13 @@ func TestRetainsMetric(t *testing.T) { func TestMapsSingleStringValueTag(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Tag: "tag", ValueMappings: map[string]interface{}{"tag_value": "valuable"}}}} - + err := mapper.Init() + require.Nil(t, err) tags := calculateProcessedTags(mapper, createTestMetric()) assertTagValue(t, "valuable", "tag", tags) } -func TestNoFailureOnMappingsOnNonSupportedValuedFields(t *testing.T) { - mapper := EnumMapper{Mappings: []Mapping{{Field: "float_value", ValueMappings: map[string]interface{}{"3.14": "pi"}}}} - - fields := calculateProcessedValues(mapper, createTestMetric()) - - assertFieldValue(t, float64(3.14), "float_value", fields) -} - func TestMappings(t *testing.T) { mappings := []map[string][]interface{}{ { @@ -104,21 +105,30 @@ func TestMappings(t *testing.T) { "mapped_value": []interface{}{"internal_error", 1, false, false, false, false}, "expected_value": []interface{}{"internal_error", 1, false, 500, 500, 500}, }, + { + "field_name": []interface{}{"float_value"}, + "target_value": []interface{}{"3.14", "3.14", "3.14", "3.14", "not_float", "5"}, + "mapped_value": []interface{}{"pi", 1, false, float64(100.2), float64(3.14), "pi"}, + "expected_value": []interface{}{"pi", 1, false, float64(100.2), float64(3.14), float64(3.14)}, + }, } for _, mapping := range mappings { - field_name := mapping["field_name"][0].(string) + fieldName := mapping["field_name"][0].(string) for index := range mapping["target_value"] { - mapper := EnumMapper{Mappings: []Mapping{{Field: field_name, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: fieldName, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} + err := mapper.Init() + assert.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) - assertFieldValue(t, mapping["expected_value"][index], field_name, fields) + assertFieldValue(t, mapping["expected_value"][index], fieldName, fields) } } } func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"other": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, 42, "string_value", fields) @@ -126,7 +136,8 @@ func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"test": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, 1, "string_value", fields) @@ -134,7 +145,8 @@ func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"other": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, "test", "string_value", fields) @@ -142,7 +154,8 @@ func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { func TestWritesToDestination(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: "string_code", ValueMappings: map[string]interface{}{"test": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, "test", "string_value", fields) @@ -152,10 +165,30 @@ func TestWritesToDestination(t *testing.T) { func TestDoNotWriteToDestinationWithoutDefaultOrDefinedMapping(t *testing.T) { field := "string_code" mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: field, ValueMappings: map[string]interface{}{"other": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, "test", "string_value", fields) _, present := fields[field] assert.False(t, present, "value of field '"+field+"' was present") } + +func TestFieldGlobMatching(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Field: "*", ValueMappings: map[string]interface{}{"test": "glob"}}}} + err := mapper.Init() + require.Nil(t, err) + fields := calculateProcessedValues(mapper, createTestMetric()) + + assertFieldValue(t, "glob", "string_value", fields) + assertFieldValue(t, "glob", "duplicate_string_value", fields) +} + +func TestTagGlobMatching(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Tag: "*", ValueMappings: map[string]interface{}{"tag_value": "glob"}}}} + err := mapper.Init() + require.Nil(t, err) + tags := calculateProcessedTags(mapper, createTestMetric()) + + assertTagValue(t, "glob", "tag", tags) +} diff --git a/plugins/processors/execd/README.md b/plugins/processors/execd/README.md index 79c354bdd4dec..6f8d376a01171 100644 --- a/plugins/processors/execd/README.md +++ b/plugins/processors/execd/README.md @@ -23,7 +23,7 @@ Telegraf minimum version: Telegraf 1.15.0 ### Configuration: ```toml -[[processor.execd]] +[[processors.execd]] ## One program to run as daemon. ## NOTE: process and each argument should each be their own string ## eg: command = ["/path/to/your_program", "arg1", "arg2"] diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index 7aeb285a44fc5..992452561db29 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/process" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -93,7 +94,7 @@ func (e *Execd) Start(acc telegraf.Accumulator) error { return nil } -func (e *Execd) Add(m telegraf.Metric, acc telegraf.Accumulator) error { +func (e *Execd) Add(m telegraf.Metric, _ telegraf.Accumulator) error { b, err := e.serializer.Serialize(m) if err != nil { return fmt.Errorf("metric serializing error: %w", err) @@ -117,6 +118,12 @@ func (e *Execd) Stop() error { } func (e *Execd) cmdReadOut(out io.Reader) { + // Prefer using the StreamParser when parsing influx format. + if _, isInfluxParser := e.parser.(*influx.Parser); isInfluxParser { + e.cmdReadOutStream(out) + return + } + scanner := bufio.NewScanner(out) scanBuf := make([]byte, 4096) scanner.Buffer(scanBuf, 262144) @@ -137,6 +144,33 @@ func (e *Execd) cmdReadOut(out io.Reader) { } } +func (e *Execd) cmdReadOutStream(out io.Reader) { + parser := influx.NewStreamParser(out) + + for { + metric, err := parser.Next() + + if err != nil { + // Stop parsing when we've reached the end. + if err == influx.EOF { + break + } + + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + // Continue past parse errors. + e.acc.AddError(parseErr) + continue + } + + // Stop reading on any non-recoverable error. + e.acc.AddError(err) + return + } + + e.acc.AddMetric(metric) + } +} + func (e *Execd) cmdReadErr(out io.Reader) { scanner := bufio.NewScanner(out) diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go index 451669ec6a130..c226725e1844e 100644 --- a/plugins/processors/execd/execd_test.go +++ b/plugins/processors/execd/execd_test.go @@ -34,7 +34,7 @@ func TestExternalProcessorWorks(t *testing.T) { orig := now metrics := []telegraf.Metric{} for i := 0; i < 10; i++ { - m, err := metric.New("test", + m := metric.New("test", map[string]string{ "city": "Toronto", }, @@ -43,7 +43,6 @@ func TestExternalProcessorWorks(t *testing.T) { "count": 1, }, now) - require.NoError(t, err) metrics = append(metrics, m) now = now.Add(1) @@ -79,6 +78,54 @@ func TestExternalProcessorWorks(t *testing.T) { } } +func TestParseLinesWithNewLines(t *testing.T) { + e := New() + e.Log = testutil.Logger{} + + exe, err := os.Executable() + require.NoError(t, err) + t.Log(exe) + e.Command = []string{exe, "-countmultiplier"} + e.RestartDelay = config.Duration(5 * time.Second) + + acc := &testutil.Accumulator{} + + require.NoError(t, e.Start(acc)) + + now := time.Now() + orig := now + + m := metric.New("test", + map[string]string{ + "author": "Mr. Gopher", + }, + map[string]interface{}{ + "phrase": "Gophers are amazing creatures.\nAbsolutely amazing.", + "count": 3, + }, + now) + + e.Add(m, acc) + + acc.Wait(1) + require.NoError(t, e.Stop()) + + processedMetric := acc.GetTelegrafMetrics()[0] + + expectedMetric := testutil.MustMetric("test", + map[string]string{ + "author": "Mr. Gopher", + }, + map[string]interface{}{ + "phrase": "Gophers are amazing creatures.\nAbsolutely amazing.", + "count": 6, + }, + orig, + ) + + testutil.RequireMetricEqual(t, expectedMetric, processedMetric) +} + var countmultiplier = flag.Bool("countmultiplier", false, "if true, act like line input program instead of test") diff --git a/plugins/processors/filepath/filepath.go b/plugins/processors/filepath/filepath.go index 70013de174a9a..26a0a7abdccf0 100644 --- a/plugins/processors/filepath/filepath.go +++ b/plugins/processors/filepath/filepath.go @@ -95,7 +95,6 @@ func (o *Options) applyFunc(bo BaseOpts, fn ProcessorFunc, metric telegraf.Metri if v, ok := v.(string); ok { metric.AddField(targetField, fn(v)) } - } } } diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go index a305c4c5c2f29..c6a3262921407 100644 --- a/plugins/processors/filepath/filepath_test.go +++ b/plugins/processors/filepath/filepath_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package filepath diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index a5666bf0030a8..10623c041dd2d 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -8,11 +8,10 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" + "github.com/influxdata/telegraf/plugins/common/parallel" si "github.com/influxdata/telegraf/plugins/inputs/snmp" "github.com/influxdata/telegraf/plugins/processors" - "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel" ) var sampleConfig = ` @@ -100,8 +99,8 @@ type IfName struct { ifTable *si.Table `toml:"-"` ifXTable *si.Table `toml:"-"` - rwLock sync.RWMutex `toml:"-"` - cache *TTLCache `toml:"-"` + lock sync.Mutex `toml:"-"` + cache *TTLCache `toml:"-"` parallel parallel.Parallel `toml:"-"` acc telegraf.Accumulator `toml:"-"` @@ -143,13 +142,13 @@ func (d *IfName) addTag(metric telegraf.Metric) error { return nil } - num_s, ok := metric.GetTag(d.SourceTag) + numS, ok := metric.GetTag(d.SourceTag) if !ok { d.Log.Warn("Source tag missing.") return nil } - num, err := strconv.ParseUint(num_s, 10, 64) + num, err := strconv.ParseUint(numS, 10, 64) if err != nil { return fmt.Errorf("couldn't parse source tag as uint") } @@ -187,9 +186,9 @@ func (d *IfName) addTag(metric telegraf.Metric) error { } func (d *IfName) invalidate(agent string) { - d.rwLock.RLock() + d.lock.Lock() d.cache.Delete(agent) - d.rwLock.RUnlock() + d.lock.Unlock() } func (d *IfName) Start(acc telegraf.Accumulator) error { @@ -201,13 +200,13 @@ func (d *IfName) Start(acc telegraf.Accumulator) error { return fmt.Errorf("parsing SNMP client config: %w", err) } - d.ifTable, err = d.makeTable("IF-MIB::ifTable") + d.ifTable, err = d.makeTable("IF-MIB::ifDescr") if err != nil { - return fmt.Errorf("looking up ifTable in local MIB: %w", err) + return fmt.Errorf("looking up ifDescr in local MIB: %w", err) } - d.ifXTable, err = d.makeTable("IF-MIB::ifXTable") + d.ifXTable, err = d.makeTable("IF-MIB::ifName") if err != nil { - return fmt.Errorf("looking up ifXTable in local MIB: %w", err) + return fmt.Errorf("looking up ifName in local MIB: %w", err) } fn := func(m telegraf.Metric) []telegraf.Metric { @@ -226,7 +225,7 @@ func (d *IfName) Start(acc telegraf.Accumulator) error { return nil } -func (d *IfName) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { +func (d *IfName) Add(metric telegraf.Metric, _ telegraf.Accumulator) error { d.parallel.Enqueue(metric) return nil } @@ -241,61 +240,61 @@ func (d *IfName) Stop() error { func (d *IfName) getMap(agent string) (entry nameMap, age time.Duration, err error) { var sig chan struct{} + d.lock.Lock() + // Check cache - d.rwLock.RLock() m, ok, age := d.cache.Get(agent) - d.rwLock.RUnlock() if ok { + d.lock.Unlock() return m, age, nil } - // Is this the first request for this agent? - d.rwLock.Lock() + // cache miss. Is this the first request for this agent? sig, found := d.sigs[agent] if !found { + // This is the first request. Make signal for subsequent requests to wait on s := make(chan struct{}) d.sigs[agent] = s sig = s } - d.rwLock.Unlock() + + d.lock.Unlock() if found { // This is not the first request. Wait for first to finish. <-sig + // Check cache again - d.rwLock.RLock() + d.lock.Lock() m, ok, age := d.cache.Get(agent) - d.rwLock.RUnlock() + d.lock.Unlock() if ok { return m, age, nil - } else { - return nil, 0, fmt.Errorf("getting remote table from cache") } + return nil, 0, fmt.Errorf("getting remote table from cache") } // The cache missed and this is the first request for this - // agent. - - // Make the SNMP request + // agent. Make the SNMP request m, err = d.getMapRemote(agent) + + d.lock.Lock() if err != nil { - //failure. signal without saving to cache - d.rwLock.Lock() + //snmp failure. signal without saving to cache close(sig) delete(d.sigs, agent) - d.rwLock.Unlock() + d.lock.Unlock() return nil, 0, fmt.Errorf("getting remote table: %w", err) } - // Cache it, then signal any other waiting requests for this agent - // and clean up - d.rwLock.Lock() + // snmp success. Cache response, then signal any other waiting + // requests for this agent and clean up d.cache.Put(agent, m) close(sig) delete(d.sigs, agent) - d.rwLock.Unlock() + d.lock.Unlock() return m, 0, nil } @@ -338,7 +337,7 @@ func init() { ClientConfig: snmp.ClientConfig{ Retries: 3, MaxRepetitions: 10, - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), Version: 2, Community: "public", }, @@ -347,11 +346,14 @@ func init() { }) } -func makeTableNoMock(tableName string) (*si.Table, error) { +func makeTableNoMock(fieldName string) (*si.Table, error) { var err error tab := si.Table{ - Oid: tableName, + Name: "ifTable", IndexAsTag: true, + Fields: []si.Field{ + {Oid: fieldName}, + }, } err = tab.Init() @@ -378,21 +380,21 @@ func buildMap(gs snmp.GosnmpWrapper, tab *si.Table, column string) (nameMap, err t := make(nameMap) for _, v := range rtab.Rows { - i_str, ok := v.Tags["index"] + iStr, ok := v.Tags["index"] if !ok { //should always have an index tag because the table should //always have IndexAsTag true return nil, fmt.Errorf("no index tag") } - i, err := strconv.ParseUint(i_str, 10, 64) + i, err := strconv.ParseUint(iStr, 10, 64) if err != nil { return nil, fmt.Errorf("index tag isn't a uint") } - name_if, ok := v.Fields[column] + nameIf, ok := v.Fields[column] if !ok { return nil, fmt.Errorf("field %s is missing", column) } - name, ok := name_if.(string) + name, ok := nameIf.(string) if !ok { return nil, fmt.Errorf("field %s isn't a string", column) } diff --git a/plugins/processors/ifname/ifname_test.go b/plugins/processors/ifname/ifname_test.go index 85ddc767411c0..4052818f7509b 100644 --- a/plugins/processors/ifname/ifname_test.go +++ b/plugins/processors/ifname/ifname_test.go @@ -6,18 +6,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" si "github.com/influxdata/telegraf/plugins/inputs/snmp" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestTable(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + t.Skip("Skipping test due to connect failures") d := IfName{} d.Init() @@ -26,7 +24,7 @@ func TestTable(t *testing.T) { config := snmp.ClientConfig{ Version: 2, - Timeout: internal.Duration{Duration: 5 * time.Second}, // Doesn't work with 0 timeout + Timeout: config.Duration(5 * time.Second), // Doesn't work with 0 timeout } gs, err := snmp.NewWrapper(config) require.NoError(t, err) @@ -42,10 +40,9 @@ func TestTable(t *testing.T) { require.NotEmpty(t, m) } -func TestIfName(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestIfNameIntegration(t *testing.T) { + t.Skip("Skipping test due to connect failures") + d := IfName{ SourceTag: "ifIndex", DestTag: "ifName", @@ -53,7 +50,7 @@ func TestIfName(t *testing.T) { CacheSize: 1000, ClientConfig: snmp.ClientConfig{ Version: 2, - Timeout: internal.Duration{Duration: 5 * time.Second}, // Doesn't work with 0 timeout + Timeout: config.Duration(5 * time.Second), // Doesn't work with 0 timeout }, } err := d.Init() diff --git a/plugins/processors/ifname/ttl_cache.go b/plugins/processors/ifname/ttl_cache.go index 8f9c4ae653499..e65a8ec7b182e 100644 --- a/plugins/processors/ifname/ttl_cache.go +++ b/plugins/processors/ifname/ttl_cache.go @@ -1,6 +1,7 @@ package ifname import ( + "runtime" "time" ) @@ -30,13 +31,22 @@ func (c *TTLCache) Get(key keyType) (valType, bool, time.Duration) { if !ok { return valType{}, false, 0 } + + if runtime.GOOS == "windows" { + // Sometimes on Windows `c.now().Sub(v.time) == 0` due to clock resolution issues: + // https://github.com/golang/go/issues/17696 + // https://github.com/golang/go/issues/29485 + // Force clock to refresh: + time.Sleep(time.Nanosecond) + } + age := c.now().Sub(v.time) if age < c.validDuration { return v.val, ok, age - } else { - c.lru.Delete(key) - return valType{}, false, 0 } + + c.lru.Delete(key) + return valType{}, false, 0 } func (c *TTLCache) Put(key keyType, value valType) { diff --git a/plugins/processors/override/override_test.go b/plugins/processors/override/override_test.go index 433751af96255..5e3c118e8f268 100644 --- a/plugins/processors/override/override_test.go +++ b/plugins/processors/override/override_test.go @@ -10,12 +10,12 @@ import ( ) func createTestMetric() telegraf.Metric { - metric, _ := metric.New("m1", + m := metric.New("m1", map[string]string{"metric_tag": "from_metric"}, map[string]interface{}{"value": int64(1)}, time.Now(), ) - return metric + return m } func calculateProcessedTags(processor Override, metric telegraf.Metric) map[string]string { diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go index 63230763ab02b..a7f5b47a1597c 100644 --- a/plugins/processors/parser/parser.go +++ b/plugins/processors/parser/parser.go @@ -1,19 +1,19 @@ package parser import ( - "log" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/processors" ) type Parser struct { parsers.Config - DropOriginal bool `toml:"drop_original"` - Merge string `toml:"merge"` - ParseFields []string `toml:"parse_fields"` - Parser parsers.Parser + DropOriginal bool `toml:"drop_original"` + Merge string `toml:"merge"` + ParseFields []string `toml:"parse_fields"` + Log telegraf.Logger `toml:"-"` + parser parsers.Parser } var SampleConfig = ` @@ -43,13 +43,14 @@ func (p *Parser) Description() string { } func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { - if p.Parser == nil { + if p.parser == nil { var err error - p.Parser, err = parsers.NewParser(&p.Config) + p.parser, err = parsers.NewParser(&p.Config) if err != nil { - log.Printf("E! [processors.parser] could not create parser: %v", err) + p.Log.Errorf("could not create parser: %v", err) return metrics } + models.SetLoggerOnPlugin(p.parser, p.Log) } results := []telegraf.Metric{} @@ -67,7 +68,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { case string: fromFieldMetric, err := p.parseField(value) if err != nil { - log.Printf("E! [processors.parser] could not parse field %s: %v", key, err) + p.Log.Errorf("could not parse field %s: %v", key, err) } for _, m := range fromFieldMetric { @@ -81,7 +82,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // prior to returning. newMetrics = append(newMetrics, fromFieldMetric...) default: - log.Printf("E! [processors.parser] field '%s' not a string, skipping", key) + p.Log.Errorf("field '%s' not a string, skipping", key) } } } @@ -114,7 +115,7 @@ func merge(base telegraf.Metric, metrics []telegraf.Metric) telegraf.Metric { } func (p *Parser) parseField(value string) ([]telegraf.Metric, error) { - return p.Parser.Parse([]byte(value)) + return p.parser.Parse([]byte(value)) } func init() { diff --git a/plugins/processors/parser/parser_test.go b/plugins/processors/parser/parser_test.go index ac042848f67ec..dedf15bf71506 100644 --- a/plugins/processors/parser/parser_test.go +++ b/plugins/processors/parser/parser_test.go @@ -7,25 +7,20 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) //compares metrics without comparing time func compareMetrics(t *testing.T, expected, actual []telegraf.Metric) { - assert.Equal(t, len(expected), len(actual)) - for i, metric := range actual { - require.Equal(t, expected[i].Name(), metric.Name()) - require.Equal(t, expected[i].Fields(), metric.Fields()) - require.Equal(t, expected[i].Tags(), metric.Tags()) - } -} - -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) + require.Equal(t, len(expected), len(actual)) + for i, m := range actual { + require.Equal(t, expected[i].Name(), m.Name()) + require.Equal(t, expected[i].Fields(), m.Fields()) + require.Equal(t, expected[i].Tags(), m.Tags()) } - return v } func TestApply(t *testing.T) { @@ -51,18 +46,17 @@ func TestApply(t *testing.T) { "method", }, }, - input: Metric( - metric.New( - "singleField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "singleField", map[string]string{ "ts": "2018-07-24T19:43:40.275Z", @@ -71,7 +65,7 @@ func TestApply(t *testing.T) { "method": "POST", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -88,18 +82,17 @@ func TestApply(t *testing.T) { "method", }, }, - input: Metric( - metric.New( - "singleField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "singleField", map[string]string{ "some": "tag", @@ -111,7 +104,7 @@ func TestApply(t *testing.T) { map[string]interface{}{ "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -127,18 +120,17 @@ func TestApply(t *testing.T) { "method", }, }, - input: Metric( - metric.New( - "singleField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "singleField", map[string]string{ "some": "tag", @@ -146,8 +138,8 @@ func TestApply(t *testing.T) { map[string]interface{}{ "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "singleField", map[string]string{ "ts": "2018-07-24T19:43:40.275Z", @@ -156,7 +148,7 @@ func TestApply(t *testing.T) { "method": "POST", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -166,23 +158,22 @@ func TestApply(t *testing.T) { DataFormat: "influx", }, dropOriginal: false, - input: Metric( - metric.New( - "influxField", - map[string]string{}, - map[string]interface{}{ - "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", - }, - time.Unix(0, 0))), + input: metric.New( + "influxField", + map[string]string{}, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "influxField", map[string]string{}, map[string]interface{}{ "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "deal", map[string]string{ "computer_name": "hosta", @@ -190,7 +181,7 @@ func TestApply(t *testing.T) { map[string]interface{}{ "message": "stuff", }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -201,18 +192,17 @@ func TestApply(t *testing.T) { config: parsers.Config{ DataFormat: "influx", }, - input: Metric( - metric.New( - "influxField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", - }, - time.Unix(0, 0))), + input: metric.New( + "influxField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "deal", map[string]string{ "computer_name": "hosta", @@ -221,7 +211,7 @@ func TestApply(t *testing.T) { map[string]interface{}{ "message": "stuff", }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -232,16 +222,15 @@ func TestApply(t *testing.T) { DataFormat: "grok", GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, }, - input: Metric( - metric.New( - "success", - map[string]string{}, - map[string]interface{}{ - "grokSample": "127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] \"GET /xampp/status.php HTTP/1.1\" 200 3891 \"http://cadenza/xampp/navi.php\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\"", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "grokSample": "127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] \"GET /xampp/status.php HTTP/1.1\" 200 3891 \"http://cadenza/xampp/navi.php\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\"", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{ "resp_code": "200", @@ -257,7 +246,7 @@ func TestApply(t *testing.T) { "ident": "-", "http_version": float64(1.1), }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -268,30 +257,29 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "err"}, }, - input: Metric( - metric.New( - "bigMeasure", - map[string]string{}, - map[string]interface{}{ - "field_1": `{"lvl":"info","msg":"http request"}`, - "field_2": `{"err":"fatal","fatal":"security threat"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bigMeasure", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "bigMeasure", map[string]string{ "err": "fatal", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -303,17 +291,16 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "msg", "err", "fatal"}, }, - input: Metric( - metric.New( - "bigMeasure", - map[string]string{}, - map[string]interface{}{ - "field_1": `{"lvl":"info","msg":"http request"}`, - "field_2": `{"err":"fatal","fatal":"security threat"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bigMeasure", map[string]string{ "lvl": "info", @@ -325,7 +312,7 @@ func TestApply(t *testing.T) { "field_1": `{"lvl":"info","msg":"http request"}`, "field_2": `{"err":"fatal","fatal":"security threat"}`, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -336,40 +323,39 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "msg", "err", "fatal"}, }, - input: Metric( - metric.New( - "bigMeasure", - map[string]string{}, - map[string]interface{}{ - "field_1": `{"lvl":"info","msg":"http request"}`, - "field_2": `{"err":"fatal","fatal":"security threat"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bigMeasure", map[string]string{}, map[string]interface{}{ "field_1": `{"lvl":"info","msg":"http request"}`, "field_2": `{"err":"fatal","fatal":"security threat"}`, }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "bigMeasure", map[string]string{ "lvl": "info", "msg": "http request", }, map[string]interface{}{}, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "bigMeasure", map[string]string{ "err": "fatal", "fatal": "security threat", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -380,31 +366,30 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl"}, }, - input: Metric( - metric.New( - "success", - map[string]string{}, - map[string]interface{}{ - "good": `{"lvl":"info"}`, - "bad": "why", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{}, map[string]interface{}{ "good": `{"lvl":"info"}`, "bad": "why", }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "success", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -415,18 +400,17 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "thing"}, }, - input: Metric( - metric.New( - "success", - map[string]string{}, - map[string]interface{}{ - "bad": "why", - "good": `{"lvl":"info"}`, - "ok": `{"thing":"thang"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "bad": "why", + "good": `{"lvl":"info"}`, + "ok": `{"thing":"thang"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{}, map[string]interface{}{ @@ -434,21 +418,21 @@ func TestApply(t *testing.T) { "good": `{"lvl":"info"}`, "ok": `{"thing":"thang"}`, }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "success", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "success", map[string]string{ "thing": "thang", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -460,19 +444,18 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl"}, }, - input: Metric( - metric.New( - "success", - map[string]string{ - "a": "tag", - }, - map[string]interface{}{ - "good": `{"lvl":"info"}`, - "bad": "why", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{ + "a": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{ "a": "tag", @@ -482,7 +465,7 @@ func TestApply(t *testing.T) { "good": `{"lvl":"info"}`, "bad": "why", }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -493,25 +476,24 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl"}, }, - input: Metric( - metric.New( - "success", - map[string]string{ - "thing": "tag", - }, - map[string]interface{}{ - "good": `{"lvl":"info"}`, - "bad": "why", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{ + "thing": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, } @@ -523,6 +505,7 @@ func TestApply(t *testing.T) { ParseFields: tt.parseFields, DropOriginal: tt.dropOriginal, Merge: tt.merge, + Log: testutil.Logger{Name: "processor.parser"}, } output := parser.Apply(tt.input) @@ -546,22 +529,21 @@ func TestBadApply(t *testing.T) { config: parsers.Config{ DataFormat: "json", }, - input: Metric( - metric.New( - "bad", - map[string]string{}, - map[string]interface{}{ - "some_field": 5, - }, - time.Unix(0, 0))), + input: metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bad", map[string]string{}, map[string]interface{}{ "some_field": 5, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -570,22 +552,21 @@ func TestBadApply(t *testing.T) { config: parsers.Config{ DataFormat: "json", }, - input: Metric( - metric.New( - "bad", - map[string]string{}, - map[string]interface{}{ - "some_field": 5, - }, - time.Unix(0, 0))), + input: metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bad", map[string]string{}, map[string]interface{}{ "some_field": 5, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, } @@ -595,6 +576,7 @@ func TestBadApply(t *testing.T) { parser := Parser{ Config: tt.config, ParseFields: tt.parseFields, + Log: testutil.Logger{Name: "processor.parser"}, } output := parser.Apply(tt.input) @@ -606,17 +588,17 @@ func TestBadApply(t *testing.T) { // Benchmarks -func getMetricFields(metric telegraf.Metric) interface{} { +func getMetricFields(m telegraf.Metric) interface{} { key := "field3" - if value, ok := metric.Fields()[key]; ok { + if value, ok := m.Fields()[key]; ok { return value } return nil } -func getMetricFieldList(metric telegraf.Metric) interface{} { +func getMetricFieldList(m telegraf.Metric) interface{} { key := "field3" - fields := metric.FieldList() + fields := m.FieldList() for _, field := range fields { if field.Key == key { return field.Value @@ -626,7 +608,7 @@ func getMetricFieldList(metric telegraf.Metric) interface{} { } func BenchmarkFieldListing(b *testing.B) { - metric := Metric(metric.New( + m := metric.New( "test", map[string]string{ "some": "tag", @@ -640,15 +622,15 @@ func BenchmarkFieldListing(b *testing.B) { "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))) + time.Unix(0, 0)) for n := 0; n < b.N; n++ { - getMetricFieldList(metric) + getMetricFieldList(m) } } func BenchmarkFields(b *testing.B) { - metric := Metric(metric.New( + m := metric.New( "test", map[string]string{ "some": "tag", @@ -662,9 +644,9 @@ func BenchmarkFields(b *testing.B) { "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))) + time.Unix(0, 0)) for n := 0; n < b.N; n++ { - getMetricFields(metric) + getMetricFields(m) } } diff --git a/plugins/processors/port_name/README.md b/plugins/processors/port_name/README.md index ad4e52d6bc187..3629aff84e90a 100644 --- a/plugins/processors/port_name/README.md +++ b/plugins/processors/port_name/README.md @@ -1,8 +1,10 @@ # Port Name Lookup Processor Plugin -Use the `port_name` processor to convert a tag containing a well-known port number to the registered service name. +Use the `port_name` processor to convert a tag or field containing a well-known port number to the registered service name. -Tag can contain a number ("80") or number and protocol separated by slash ("443/tcp"). If protocol is not provided it defaults to tcp but can be changed with the default_protocol setting. +Tag or field can contain a number ("80") or number and protocol separated by slash ("443/tcp"). If protocol is not provided it defaults to tcp but can be changed with the default_protocol setting. An additional tag or field can be specified for the protocol. + +If the source was found in tag, the service name will be added as a tag. If the source was found in a field, the service name will also be a field. Telegraf minimum version: Telegraf 1.15.0 @@ -12,12 +14,20 @@ Telegraf minimum version: Telegraf 1.15.0 [[processors.port_name]] ## Name of tag holding the port number # tag = "port" + ## Or name of the field holding the port number + # field = "port" - ## Name of output tag where service name will be added + ## Name of output tag or field (depending on the source) where service name will be added # dest = "service" ## Default tcp or udp # default_protocol = "tcp" + + ## Tag containing the protocol (tcp or udp, case-insensitive) + # protocol_tag = "proto" + + ## Field containing the protocol (tcp or udp, case-insensitive) + # protocol_field = "proto" ``` ### Example diff --git a/plugins/processors/port_name/port_name.go b/plugins/processors/port_name/port_name.go index 50c893e60d6dc..60817dbdd244c 100644 --- a/plugins/processors/port_name/port_name.go +++ b/plugins/processors/port_name/port_name.go @@ -15,12 +15,20 @@ var sampleConfig = ` [[processors.port_name]] ## Name of tag holding the port number # tag = "port" + ## Or name of the field holding the port number + # field = "port" - ## Name of output tag where service name will be added + ## Name of output tag or field (depending on the source) where service name will be added # dest = "service" ## Default tcp or udp # default_protocol = "tcp" + + ## Tag containing the protocol (tcp or udp, case-insensitive) + # protocol_tag = "proto" + + ## Field containing the protocol (tcp or udp, case-insensitive) + # protocol_field = "proto" ` type sMap map[string]map[int]string // "https" == services["tcp"][443] @@ -29,18 +37,21 @@ var services sMap type PortName struct { SourceTag string `toml:"tag"` - DestTag string `toml:"dest"` + SourceField string `toml:"field"` + Dest string `toml:"dest"` DefaultProtocol string `toml:"default_protocol"` + ProtocolTag string `toml:"protocol_tag"` + ProtocolField string `toml:"protocol_field"` Log telegraf.Logger `toml:"-"` } -func (d *PortName) SampleConfig() string { +func (pn *PortName) SampleConfig() string { return sampleConfig } -func (d *PortName) Description() string { - return "Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file" +func (pn *PortName) Description() string { + return "Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file" } func readServicesFile() { @@ -95,19 +106,43 @@ func readServices(r io.Reader) sMap { return services } -func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { +func (pn *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { for _, m := range metrics { - portProto, ok := m.GetTag(d.SourceTag) - if !ok { - // Nonexistent tag + var portProto string + var fromField bool + + if len(pn.SourceTag) > 0 { + if tag, ok := m.GetTag(pn.SourceTag); ok { + portProto = tag + } + } + if len(pn.SourceField) > 0 { + if field, ok := m.GetField(pn.SourceField); ok { + switch v := field.(type) { + default: + pn.Log.Errorf("Unexpected type %t in source field; must be string or int", v) + continue + case int64: + portProto = strconv.FormatInt(v, 10) + case uint64: + portProto = strconv.FormatUint(v, 10) + case string: + portProto = v + } + fromField = true + } + } + + if len(portProto) == 0 { continue } + portProtoSlice := strings.SplitN(portProto, "/", 2) l := len(portProtoSlice) if l == 0 { // Empty tag - d.Log.Errorf("empty port tag: %v", d.SourceTag) + pn.Log.Errorf("empty port tag: %v", pn.SourceTag) continue } @@ -118,15 +153,32 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { port, err = strconv.Atoi(val) if err != nil { // Can't convert port to string - d.Log.Errorf("error converting port to integer: %v", val) + pn.Log.Errorf("error converting port to integer: %v", val) continue } } - proto := d.DefaultProtocol + proto := pn.DefaultProtocol if l > 1 && len(portProtoSlice[1]) > 0 { proto = portProtoSlice[1] } + if len(pn.ProtocolTag) > 0 { + if tag, ok := m.GetTag(pn.ProtocolTag); ok { + proto = tag + } + } + if len(pn.ProtocolField) > 0 { + if field, ok := m.GetField(pn.ProtocolField); ok { + switch v := field.(type) { + default: + pn.Log.Errorf("Unexpected type %t in protocol field; must be string", v) + continue + case string: + proto = v + } + } + } + proto = strings.ToLower(proto) protoMap, ok := services[proto] @@ -137,7 +189,7 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // normally has entries for both, so our map does too. If // not, it's very likely the source tag or the services // file doesn't make sense. - d.Log.Errorf("protocol not found in services map: %v", proto) + pn.Log.Errorf("protocol not found in services map: %v", proto) continue } @@ -147,17 +199,21 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // // Not all ports are named so this isn't an error, but // it's helpful to know when debugging. - d.Log.Debugf("port not found in services map: %v", port) + pn.Log.Debugf("port not found in services map: %v", port) continue } - m.AddTag(d.DestTag, service) + if fromField { + m.AddField(pn.Dest, service) + } else { + m.AddTag(pn.Dest, service) + } } return metrics } -func (h *PortName) Init() error { +func (pn *PortName) Init() error { services = make(sMap) readServicesFile() return nil @@ -167,8 +223,11 @@ func init() { processors.Add("port_name", func() telegraf.Processor { return &PortName{ SourceTag: "port", - DestTag: "service", + SourceField: "port", + Dest: "service", DefaultProtocol: "tcp", + ProtocolTag: "proto", + ProtocolField: "proto", } }) } diff --git a/plugins/processors/port_name/port_name_test.go b/plugins/processors/port_name/port_name_test.go index b58f95a9eb75a..46839b2bea80b 100644 --- a/plugins/processors/port_name/port_name_test.go +++ b/plugins/processors/port_name/port_name_test.go @@ -28,12 +28,15 @@ func TestFakeServices(t *testing.T) { func TestTable(t *testing.T) { var tests = []struct { - name string - tag string - dest string - prot string - input []telegraf.Metric - expected []telegraf.Metric + name string + tag string + field string + dest string + prot string + protField string + protTag string + input []telegraf.Metric + expected []telegraf.Metric }{ { name: "ordinary tcp default", @@ -239,6 +242,93 @@ func TestTable(t *testing.T) { ), }, }, + { + name: "read from field instead of tag", + field: "foo", + dest: "bar", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + "bar": "http", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "read proto from field", + field: "foo", + dest: "bar", + prot: "udp", + protField: "proto", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + "proto": "tcp", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + "bar": "http", + "proto": "tcp", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "read proto from tag", + tag: "foo", + dest: "bar", + prot: "udp", + protTag: "proto", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + "proto": "tcp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + "bar": "http", + "proto": "tcp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, } r := strings.NewReader(fakeServices) @@ -248,8 +338,11 @@ func TestTable(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := PortName{ SourceTag: tt.tag, - DestTag: tt.dest, + SourceField: tt.field, + Dest: tt.dest, DefaultProtocol: tt.prot, + ProtocolField: tt.protField, + ProtocolTag: tt.protTag, Log: testutil.Logger{}, } diff --git a/plugins/processors/port_name/services_path.go b/plugins/processors/port_name/services_path.go index c8cf73d14157c..3b9a4ce579c9a 100644 --- a/plugins/processors/port_name/services_path.go +++ b/plugins/processors/port_name/services_path.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package portname diff --git a/plugins/processors/port_name/services_path_notwindows.go b/plugins/processors/port_name/services_path_notwindows.go index 5097bfa9c6140..5fd30eb59671d 100644 --- a/plugins/processors/port_name/services_path_notwindows.go +++ b/plugins/processors/port_name/services_path_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package portname diff --git a/plugins/processors/regex/regex_test.go b/plugins/processors/regex/regex_test.go index b0ddf47d08a7b..2f8890bba7e9e 100644 --- a/plugins/processors/regex/regex_test.go +++ b/plugins/processors/regex/regex_test.go @@ -10,7 +10,7 @@ import ( ) func newM1() telegraf.Metric { - m1, _ := metric.New("access_log", + m1 := metric.New("access_log", map[string]string{ "verb": "GET", "resp_code": "200", @@ -24,7 +24,7 @@ func newM1() telegraf.Metric { } func newM2() telegraf.Metric { - m2, _ := metric.New("access_log", + m2 := metric.New("access_log", map[string]string{ "verb": "GET", "resp_code": "200", diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go index 1f8e0b7db3a4a..36e8aaeed43a0 100644 --- a/plugins/processors/rename/rename_test.go +++ b/plugins/processors/rename/rename_test.go @@ -16,7 +16,7 @@ func newMetric(name string, tags map[string]string, fields map[string]interface{ if fields == nil { fields = map[string]interface{}{} } - m, _ := metric.New(name, tags, fields, time.Now()) + m := metric.New(name, tags, fields, time.Now()) return m } diff --git a/plugins/processors/reverse_dns/rdnscache.go b/plugins/processors/reverse_dns/rdnscache.go index 1d86b5385d218..cc9574552dae8 100644 --- a/plugins/processors/reverse_dns/rdnscache.go +++ b/plugins/processors/reverse_dns/rdnscache.go @@ -111,7 +111,7 @@ func (d *ReverseDNSCache) lookup(ip string) ([]string, error) { // check if the value is cached d.rwLock.RLock() result, found := d.lockedGetFromCache(ip) - if found && result.completed && result.expiresAt.After(time.Now()) { + if found && result.completed && !result.expiresAt.Before(time.Now()) { defer d.rwLock.RUnlock() atomic.AddUint64(&d.stats.CacheHit, 1) // cache is valid @@ -176,7 +176,7 @@ func (d *ReverseDNSCache) subscribeTo(ip string) callbackChannelType { // the dnslookup that is returned until you clone it. func (d *ReverseDNSCache) lockedGetFromCache(ip string) (lookup *dnslookup, found bool) { lookup, found = d.cache[ip] - if found && lookup.expiresAt.Before(time.Now()) { + if found && !lookup.expiresAt.After(time.Now()) { return nil, false } return lookup, found @@ -185,7 +185,7 @@ func (d *ReverseDNSCache) lockedGetFromCache(ip string) (lookup *dnslookup, foun // lockedSaveToCache stores a lookup in the correct internal ip cache. // you MUST first do a write lock before calling it. func (d *ReverseDNSCache) lockedSaveToCache(lookup *dnslookup) { - if lookup.expiresAt.Before(time.Now()) { + if !lookup.expiresAt.After(time.Now()) { return // don't cache. } d.cache[lookup.ip] = lookup @@ -277,7 +277,7 @@ func (d *ReverseDNSCache) cleanup() { } ipsToDelete := []string{} for i := 0; i < len(d.expireList); i++ { - if d.expireList[i].expiresAt.After(now) { + if !d.expireList[i].expiresAt.Before(now) { break // done. Nothing after this point is expired. } ipsToDelete = append(ipsToDelete, d.expireList[i].ip) diff --git a/plugins/processors/reverse_dns/rdnscache_test.go b/plugins/processors/reverse_dns/rdnscache_test.go index e8466c27fd315..97cc8abdbdff8 100644 --- a/plugins/processors/reverse_dns/rdnscache_test.go +++ b/plugins/processors/reverse_dns/rdnscache_test.go @@ -125,12 +125,12 @@ func TestLookupTimeout(t *testing.T) { type timeoutResolver struct{} -func (r *timeoutResolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) { +func (r *timeoutResolver) LookupAddr(_ context.Context, _ string) (names []string, err error) { return nil, errors.New("timeout") } type localResolver struct{} -func (r *localResolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) { +func (r *localResolver) LookupAddr(_ context.Context, _ string) (names []string, err error) { return []string{"localhost"}, nil } diff --git a/plugins/processors/reverse_dns/reversedns.go b/plugins/processors/reverse_dns/reversedns.go index bef79a01c92eb..966748420bc8d 100644 --- a/plugins/processors/reverse_dns/reversedns.go +++ b/plugins/processors/reverse_dns/reversedns.go @@ -5,8 +5,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/parallel" "github.com/influxdata/telegraf/plugins/processors" - "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel" ) const sampleConfig = ` @@ -104,7 +104,7 @@ func (r *ReverseDNS) Stop() error { return nil } -func (r *ReverseDNS) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { +func (r *ReverseDNS) Add(metric telegraf.Metric, _ telegraf.Accumulator) error { r.parallel.Enqueue(metric) return nil } diff --git a/plugins/processors/reverse_dns/reversedns_test.go b/plugins/processors/reverse_dns/reversedns_test.go index 499dffb77e08b..5fcce5fb4725a 100644 --- a/plugins/processors/reverse_dns/reversedns_test.go +++ b/plugins/processors/reverse_dns/reversedns_test.go @@ -1,18 +1,20 @@ package reverse_dns import ( + "runtime" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSimpleReverseLookup(t *testing.T) { now := time.Now() - m, _ := metric.New("name", map[string]string{ + m := metric.New("name", map[string]string{ "dest_ip": "8.8.8.8", }, map[string]interface{}{ "source_ip": "127.0.0.1", @@ -40,7 +42,10 @@ func TestSimpleReverseLookup(t *testing.T) { processedMetric := acc.GetTelegrafMetrics()[0] f, ok := processedMetric.GetField("source_name") require.True(t, ok) - require.EqualValues(t, "localhost", f) + if runtime.GOOS != "windows" { + // lookupAddr on Windows works differently than on Linux so `source_name` won't be "localhost" on every environment + require.EqualValues(t, "localhost", f) + } tag, ok := processedMetric.GetTag("dest_name") require.True(t, ok) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 1b541c33857ed..9ca231c5aeb8b 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -30,6 +30,13 @@ def apply(metric): ## File containing a Starlark script. # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [processors.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true ``` ### Usage @@ -95,12 +102,21 @@ While Starlark is similar to Python, there are important differences to note: The ability to load external scripts other than your own is pretty limited. The following libraries are available for loading: -* json: `load("json.star", "json")` provides the following functions: `json.encode()`, `json.decode()`, `json.indent()`. See [json.star](/plugins/processors/starlark/testdata/json.star) for an example. +* json: `load("json.star", "json")` provides the following functions: `json.encode()`, `json.decode()`, `json.indent()`. See [json.star](/plugins/processors/starlark/testdata/json.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/json). +* log: `load("logging.star", "log")` provides the following functions: `log.debug()`, `log.info()`, `log.warn()`, `log.error()`. See [logging.star](/plugins/processors/starlark/testdata/logging.star) for an example. +* math: `load("math.star", "math")` provides [the following functions and constants](https://pkg.go.dev/go.starlark.net/lib/math). See [math.star](/plugins/processors/starlark/testdata/math.star) for an example. +* time: `load("time.star", "time")` provides the following functions: `time.from_timestamp()`, `time.is_valid_timezone()`, `time.now()`, `time.parse_duration()`, `time.parseTime()`, `time.time()`. See [time_date.star](/plugins/processors/starlark/testdata/time_date.star), [time_duration.star](/plugins/processors/starlark/testdata/time_duration.star) and/or [time_timestamp.star](/plugins/processors/starlark/testdata/time_timestamp.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/time). If you would like to see support for something else here, please open an issue. ### Common Questions +**What's the performance cost to using Starlark?** + +In local tests, it takes about 1µs (1 microsecond) to run a modest script to process one +metric. This is going to vary with the size of your script, but the total impact is minimal. +At this pace, it's likely not going to be the bottleneck in your Telegraf setup. + **How can I drop/delete a metric?** If you don't return the metric it will be deleted. Usually this means the @@ -151,19 +167,81 @@ def apply(metric): **How can I save values across multiple calls to the script?** -Telegraf freezes the global scope, which prevents it from being modified. -Attempting to modify the global scope will fail with an error. +Telegraf freezes the global scope, which prevents it from being modified, except for a special shared global dictionary +named `state`, this can be used by the `apply` function. +See an example of this in [compare with previous metric](/plugins/processors/starlark/testdata/compare_metrics.star) + +Other than the `state` variable, attempting to modify the global scope will fail with an error. + +**How to manage errors that occur in the apply function?** + +In case you need to call some code that may return an error, you can delegate the call +to the built-in function `catch` which takes as argument a `Callable` and returns the error +that occured if any, `None` otherwise. + +So for example: + +```python +load("json.star", "json") + +def apply(metric): + error = catch(lambda: failing(metric)) + if error != None: + # Some code to execute in case of an error + metric.fields["error"] = error + return metric + +def failing(metric): + json.decode("non-json-content") +``` +**How to reuse the same script but with different parameters?** + +In case you have a generic script that you would like to reuse for different instances of the plugin, you can use constants as input parameters of your script. +So for example, assuming that you have the next configuration: + +```toml +[[processors.starlark]] + script = "/usr/local/bin/myscript.star" + + [processors.starlark.constants] + somecustomnum = 10 + somecustomstr = "mycustomfield" +``` + +Your script could then use the constants defined in the configuration as follows: + +```python +def apply(metric): + if metric.fields[somecustomstr] >= somecustomnum: + metric.fields.clear() + return metric +``` ### Examples +- [drop string fields](/plugins/processors/starlark/testdata/drop_string_fields.star) - Drop fields containing string values. +- [drop fields with unexpected type](/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star) - Drop fields containing unexpected value types. +- [iops](/plugins/processors/starlark/testdata/iops.star) - obtain IOPS (to aggregate, to produce max_iops) - [json](/plugins/processors/starlark/testdata/json.star) - an example of processing JSON from a field in a metric +- [math](/plugins/processors/starlark/testdata/math.star) - Use a math function to compute the value of a field. [The list of the supported math functions and constants](https://pkg.go.dev/go.starlark.net/lib/math). - [number logic](/plugins/processors/starlark/testdata/number_logic.star) - transform a numerical value to another numerical value - [pivot](/plugins/processors/starlark/testdata/pivot.star) - Pivots a key's value to be the key for another key. - [ratio](/plugins/processors/starlark/testdata/ratio.star) - Compute the ratio of two integer fields - [rename](/plugins/processors/starlark/testdata/rename.star) - Rename tags or fields using a name mapping. - [scale](/plugins/processors/starlark/testdata/scale.star) - Multiply any field by a number -- [value filter](/plugins/processors/starlark/testdata/value_filter.star) - remove a metric based on a field value. +- [time date](/plugins/processors/starlark/testdata/time_date.star) - Parse a date and extract the year, month and day from it. +- [time duration](/plugins/processors/starlark/testdata/time_duration.star) - Parse a duration and convert it into a total amount of seconds. +- [time timestamp](/plugins/processors/starlark/testdata/time_timestamp.star) - Filter metrics based on the timestamp in seconds. +- [time timestamp nanoseconds](/plugins/processors/starlark/testdata/time_timestamp_nanos.star) - Filter metrics based on the timestamp with nanoseconds. +- [time timestamp current](/plugins/processors/starlark/testdata/time_set_timestamp.star) - Setting the metric timestamp to the current/local time. +- [value filter](/plugins/processors/starlark/testdata/value_filter.star) - Remove a metric based on a field value. +- [logging](/plugins/processors/starlark/testdata/logging.star) - Log messages with the logger of Telegraf +- [multiple metrics](/plugins/processors/starlark/testdata/multiple_metrics.star) - Return multiple metrics by using [a list](https://docs.bazel.build/versions/master/skylark/lib/list.html) of metrics. +- [multiple metrics from json array](/plugins/processors/starlark/testdata/multiple_metrics_with_json.star) - Builds a new metric from each element of a json array then returns all the created metrics. +- [custom error](/plugins/processors/starlark/testdata/fail.star) - Return a custom error with [fail](https://docs.bazel.build/versions/master/skylark/lib/globals.html#fail). +- [compare with previous metric](/plugins/processors/starlark/testdata/compare_metrics.star) - Compare the current metric with the previous one using the shared state. +- [rename prometheus remote write](/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) - Rename prometheus remote write measurement name with fieldname and rename fieldname to value. [All examples](/plugins/processors/starlark/testdata) are in the testdata folder. diff --git a/plugins/processors/starlark/builtins.go b/plugins/processors/starlark/builtins.go index 4eda39b7d8d12..6876fe9636ab5 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/processors/starlark/builtins.go @@ -9,21 +9,18 @@ import ( "go.starlark.net/starlark" ) -func newMetric(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func newMetric(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var name starlark.String if err := starlark.UnpackPositionalArgs("Metric", args, kwargs, 1, &name); err != nil { return nil, err } - m, err := metric.New(string(name), nil, nil, time.Now()) - if err != nil { - return nil, err - } + m := metric.New(string(name), nil, nil, time.Now()) return &Metric{metric: m}, nil } -func deepcopy(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func deepcopy(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var sm *Metric if err := starlark.UnpackPositionalArgs("deepcopy", args, kwargs, 1, &sm); err != nil { return nil, err @@ -34,6 +31,19 @@ func deepcopy(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, return &Metric{metric: dup}, nil } +// catch(f) evaluates f() and returns its evaluation error message +// if it failed or None if it succeeded. +func catch(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + var fn starlark.Callable + if err := starlark.UnpackArgs("catch", args, kwargs, "fn", &fn); err != nil { + return nil, err + } + if _, err := starlark.Call(thread, fn, nil, nil); err != nil { + return starlark.String(err.Error()), nil + } + return starlark.None, nil +} + type builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) { @@ -58,16 +68,10 @@ func builtinAttrNames(methods map[string]builtinMethod) []string { return names } -// nameErr returns an error message of the form "name: msg" -// where name is b.Name() and msg is a string or error. -func nameErr(b *starlark.Builtin, msg interface{}) error { - return fmt.Errorf("%s: %v", b.Name(), msg) -} - // --- dictionary methods --- // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear -func dict_clear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictClear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -79,7 +83,7 @@ func dict_clear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop -func dict_pop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictPop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var k, d starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -99,7 +103,7 @@ func dict_pop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem -func dict_popitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictPopitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -111,7 +115,7 @@ func dict_popitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tu } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get -func dict_get(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictGet(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key, dflt starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -127,7 +131,7 @@ func dict_get(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault -func dict_setdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictSetdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key, dflt starlark.Value = nil, starlark.None if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -148,7 +152,7 @@ func dict_setdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { // Unpack the arguments if len(args) > 1 { return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) @@ -178,7 +182,6 @@ func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tup iter2 := starlark.Iterate(pair) if iter2 == nil { return nil, fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) - } defer iter2.Done() len := starlark.Len(pair) @@ -221,7 +224,7 @@ func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tup } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items -func dict_items(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictItems(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -234,7 +237,7 @@ func dict_items(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys -func dict_keys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictKeys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -248,7 +251,7 @@ func dict_keys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_values(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictValues(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } diff --git a/plugins/processors/starlark/field_dict.go b/plugins/processors/starlark/field_dict.go index e0c0349b617a1..4a332b8268d9d 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/processors/starlark/field_dict.go @@ -3,6 +3,7 @@ package starlark import ( "errors" "fmt" + "reflect" "strings" "github.com/influxdata/telegraf" @@ -58,15 +59,15 @@ func (d FieldDict) Attr(name string) (starlark.Value, error) { } var FieldDictMethods = map[string]builtinMethod{ - "clear": dict_clear, - "get": dict_get, - "items": dict_items, - "keys": dict_keys, - "pop": dict_pop, - "popitem": dict_popitem, - "setdefault": dict_setdefault, - "update": dict_update, - "values": dict_values, + "clear": dictClear, + "get": dictGet, + "items": dictItems, + "keys": dictKeys, + "pop": dictPop, + "popitem": dictPopitem, + "setdefault": dictSetdefault, + "update": dictUpdate, + "values": dictValues, } // Get implements the starlark.Mapping interface. @@ -174,6 +175,7 @@ func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err e sv, err := asStarlarkValue(value) return sv, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") @@ -210,17 +212,44 @@ func (i *FieldIterator) Done() { // AsStarlarkValue converts a field value to a starlark.Value. func asStarlarkValue(value interface{}) (starlark.Value, error) { - switch v := value.(type) { - case float64: - return starlark.Float(v), nil - case int64: - return starlark.MakeInt64(v), nil - case uint64: - return starlark.MakeUint64(v), nil - case string: - return starlark.String(v), nil - case bool: - return starlark.Bool(v), nil + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Slice: + length := v.Len() + array := make([]starlark.Value, length) + for i := 0; i < length; i++ { + sVal, err := asStarlarkValue(v.Index(i).Interface()) + if err != nil { + return starlark.None, err + } + array[i] = sVal + } + return starlark.NewList(array), nil + case reflect.Map: + dict := starlark.NewDict(v.Len()) + iter := v.MapRange() + for iter.Next() { + sKey, err := asStarlarkValue(iter.Key().Interface()) + if err != nil { + return starlark.None, err + } + sValue, err := asStarlarkValue(iter.Value().Interface()) + if err != nil { + return starlark.None, err + } + dict.SetKey(sKey, sValue) + } + return dict, nil + case reflect.Float32, reflect.Float64: + return starlark.Float(v.Float()), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return starlark.MakeInt64(v.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return starlark.MakeUint64(v.Uint()), nil + case reflect.String: + return starlark.String(v.String()), nil + case reflect.Bool: + return starlark.Bool(v.Bool()), nil } return starlark.None, errors.New("invalid type") diff --git a/plugins/processors/starlark/logging.go b/plugins/processors/starlark/logging.go new file mode 100644 index 0000000000000..35efa6a7effba --- /dev/null +++ b/plugins/processors/starlark/logging.go @@ -0,0 +1,47 @@ +package starlark + +import ( + "errors" + "fmt" + + "github.com/influxdata/telegraf" + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" +) + +// Builds a module that defines all the supported logging functions which will log using the provided logger +func LogModule(logger telegraf.Logger) *starlarkstruct.Module { + var logFunc = func(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + return log(b, args, kwargs, logger) + } + return &starlarkstruct.Module{ + Name: "log", + Members: starlark.StringDict{ + "debug": starlark.NewBuiltin("log.debug", logFunc), + "info": starlark.NewBuiltin("log.info", logFunc), + "warn": starlark.NewBuiltin("log.warn", logFunc), + "error": starlark.NewBuiltin("log.error", logFunc), + }, + } +} + +// Logs the provided message according to the level chosen +func log(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple, logger telegraf.Logger) (starlark.Value, error) { + var msg starlark.String + if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &msg); err != nil { + return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) + } + switch b.Name() { + case "log.debug": + logger.Debug(string(msg)) + case "log.info": + logger.Info(string(msg)) + case "log.warn": + logger.Warn(string(msg)) + case "log.error": + logger.Error(string(msg)) + default: + return nil, errors.New("method " + b.Name() + " is unknown") + } + return starlark.None, nil +} diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index cf791b3f155e3..44f78fa6b6988 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -7,6 +7,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/processors" + "go.starlark.net/lib/math" + "go.starlark.net/lib/time" "go.starlark.net/resolve" "go.starlark.net/starlark" "go.starlark.net/starlarkjson" @@ -27,19 +29,28 @@ def apply(metric): ## File containing a Starlark script. # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [processors.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true ` ) type Starlark struct { - Source string `toml:"source"` - Script string `toml:"script"` + Source string `toml:"source"` + Script string `toml:"script"` + Constants map[string]interface{} `toml:"constants"` Log telegraf.Logger `toml:"-"` - thread *starlark.Thread - applyFunc *starlark.Function - args starlark.Tuple - results []telegraf.Metric + thread *starlark.Thread + applyFunc *starlark.Function + args starlark.Tuple + results []telegraf.Metric + starlarkLoadFunc func(module string, logger telegraf.Logger) (starlark.StringDict, error) } func (s *Starlark) Init() error { @@ -52,12 +63,16 @@ func (s *Starlark) Init() error { s.thread = &starlark.Thread{ Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, - Load: loadFunc, + Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { + return s.starlarkLoadFunc(module, s.Log) + }, } builtins := starlark.StringDict{} builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) + builtins["catch"] = starlark.NewBuiltin("catch", catch) + s.addConstants(&builtins) program, err := s.sourceProgram(builtins) if err != nil { @@ -70,6 +85,9 @@ func (s *Starlark) Init() error { return err } + // Make available a shared state to the apply function + globals["state"] = starlark.NewDict(0) + // Freeze the global state. This prevents modifications to the processor // state and prevents scripts from containing errors storing tracking // metrics. Tasks that require global state will not be possible due to @@ -121,7 +139,7 @@ func (s *Starlark) Description() string { return description } -func (s *Starlark) Start(acc telegraf.Accumulator) error { +func (s *Starlark) Start(_ telegraf.Accumulator) error { return nil } @@ -191,6 +209,17 @@ func (s *Starlark) Stop() error { return nil } +// Add all the constants defined in the plugin as constants of the script +func (s *Starlark) addConstants(builtins *starlark.StringDict) { + for key, val := range s.Constants { + sVal, err := asStarlarkValue(val) + if err != nil { + s.Log.Errorf("Unsupported type: %T", val) + } + (*builtins)[key] = sVal + } +} + func containsMetric(metrics []telegraf.Metric, metric telegraf.Metric) bool { for _, m := range metrics { if m == metric { @@ -212,16 +241,30 @@ func init() { func init() { processors.AddStreaming("starlark", func() telegraf.StreamingProcessor { - return &Starlark{} + return &Starlark{ + starlarkLoadFunc: loadFunc, + } }) } -func loadFunc(thread *starlark.Thread, module string) (starlark.StringDict, error) { +func loadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { switch module { case "json.star": return starlark.StringDict{ "json": starlarkjson.Module, }, nil + case "logging.star": + return starlark.StringDict{ + "log": LogModule(logger), + }, nil + case "math.star": + return starlark.StringDict{ + "math": math.Module, + }, nil + case "time.star": + return starlark.StringDict{ + "time": time.Module, + }, nil default: return nil, errors.New("module " + module + " is not available") } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index ce0b1803c959c..6ad169bbf3f87 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -1,8 +1,8 @@ package starlark import ( + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -10,9 +10,13 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + starlarktime "go.starlark.net/lib/time" + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" ) // Tests for runtime errors in the processors Init function. @@ -24,8 +28,9 @@ func TestInitError(t *testing.T) { { name: "source must define apply", plugin: &Starlark{ - Source: "", - Log: testutil.Logger{}, + Source: "", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -34,7 +39,8 @@ func TestInitError(t *testing.T) { Source: ` apply = 42 `, - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -44,7 +50,8 @@ apply = 42 def apply(): pass `, - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -53,13 +60,15 @@ def apply(): Source: ` for `, - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { name: "no source no script", plugin: &Starlark{ - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -69,15 +78,17 @@ for def apply(): pass `, - Script: "testdata/ratio.star", - Log: testutil.Logger{}, + Script: "testdata/ratio.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { name: "script file not found", plugin: &Starlark{ - Script: "testdata/file_not_found.star", - Log: testutil.Logger{}, + Script: "testdata/file_not_found.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, } @@ -217,8 +228,9 @@ def apply(metric): for _, tt := range applyTests { t.Run(tt.name, func(t *testing.T) { plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, + Source: tt.source, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, } err := plugin.Init() require.NoError(t, err) @@ -250,6 +262,7 @@ func TestMetric(t *testing.T) { var tests = []struct { name string source string + constants map[string]interface{} input []telegraf.Metric expected []telegraf.Metric expectedErrorStr string @@ -692,6 +705,49 @@ def apply(metric): ), }, }, + { + name: "pop tag (default)", + source: ` +def apply(metric): + metric.tags['host2'] = metric.tags.pop('url', 'foo.org') + return metric +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "url": "bar.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "host2": "foo.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "host2": "bar.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + }, + }, { name: "popitem tags", source: ` @@ -1760,6 +1816,53 @@ def apply(metric): ), }, }, + { + name: "pop field (default)", + source: ` +def apply(metric): + metric.fields['idle_count'] = metric.fields.pop('count', 10) + return metric +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "count": 0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "idle_count": 10, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "idle_count": 0, + }, + time.Unix(0, 0), + ), + }, + }, { name: "popitem field", source: ` @@ -2378,13 +2481,105 @@ def apply(metric): ), }, }, + { + name: "support errors", + source: ` +load("json.star", "json") + +def apply(metric): + msg = catch(lambda: process(metric)) + if msg != None: + metric.fields["error"] = msg + metric.fields["value"] = "default" + return metric + +def process(metric): + metric.fields["field1"] = "value1" + metric.tags["tags1"] = "value2" + # Throw an error + json.decode(metric.fields.get('value')) + # Should never be called + metric.fields["msg"] = "value4" +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": "non-json-content", "msg": "value3"}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{"tags1": "value2"}, + map[string]interface{}{ + "value": "default", + "field1": "value1", + "msg": "value3", + "error": "json.decode: at offset 0, unexpected character 'n'", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "support constants", + source: ` +def apply(metric): + metric.fields["p1"] = max_size + metric.fields["p2"] = threshold + metric.fields["p3"] = default_name + metric.fields["p4"] = debug_mode + metric.fields["p5"] = supported_values[0] + metric.fields["p6"] = supported_values[1] + metric.fields["p7"] = supported_entries[2] + metric.fields["p8"] = supported_entries["3"] + return metric + `, + constants: map[string]interface{}{ + "max_size": 10, + "threshold": 0.75, + "default_name": "Julia", + "debug_mode": true, + "supported_values": []interface{}{2, "3"}, + "supported_entries": map[interface{}]interface{}{ + 2: "two", + "3": "three", + }, + "unsupported_type": time.Now(), + }, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "p1": 10, + "p2": 0.75, + "p3": "Julia", + "p4": true, + "p5": 2, + "p6": "3", + "p7": "two", + "p8": "three", + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, + Source: tt.source, + Log: testutil.Logger{}, + Constants: tt.constants, + starlarkLoadFunc: testLoadFunc, } err := plugin.Init() require.NoError(t, err) @@ -2411,6 +2606,108 @@ def apply(metric): } } +// Tests the behavior of the plugin according the provided TOML configuration. +func TestConfig(t *testing.T) { + var tests = []struct { + name string + config string + input []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "support constants from configuration", + config: ` +[[processors.starlark]] + source = ''' +def apply(metric): + metric.fields["p1"] = max_size + metric.fields["p2"] = threshold + metric.fields["p3"] = default_name + metric.fields["p4"] = debug_mode + metric.fields["p5"] = supported_values[0] + metric.fields["p6"] = supported_values[1] + metric.fields["p7"] = supported_entries["2"] + metric.fields["p8"] = supported_entries["3"] + return metric +''' + [processors.starlark.constants] + max_size = 10 + threshold = 0.75 + default_name = "Elsa" + debug_mode = true + supported_values = ["2", "3"] + supported_entries = { "2" = "two", "3" = "three" } + unsupported_type = 2009-06-12 + `, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "p1": 10, + "p2": 0.75, + "p3": "Elsa", + "p4": true, + "p5": "2", + "p6": "3", + "p7": "two", + "p8": "three", + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin, err := buildPlugin(tt.config) + require.NoError(t, err) + err = plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + + err = plugin.Start(&acc) + require.NoError(t, err) + + for _, m := range tt.input { + err = plugin.Add(m, &acc) + require.NoError(t, err) + } + + err = plugin.Stop() + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} + +// Build a Starlark plugin from the provided configuration. +func buildPlugin(configContent string) (*Starlark, error) { + c := config.NewConfig() + err := c.LoadConfigData([]byte(configContent)) + if err != nil { + return nil, err + } + if len(c.Processors) != 1 { + return nil, errors.New("Only one processor was expected") + } + plugin, ok := (c.Processors[0].Processor).(*Starlark) + if !ok { + return nil, errors.New("Only a Starlark processor was expected") + } + plugin.Log = testutil.Logger{} + return plugin, nil +} + func TestScript(t *testing.T) { var tests = []struct { name string @@ -2422,8 +2719,9 @@ func TestScript(t *testing.T) { { name: "rename", plugin: &Starlark{ - Script: "testdata/rename.star", - Log: testutil.Logger{}, + Script: "testdata/rename.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("cpu", @@ -2446,11 +2744,79 @@ func TestScript(t *testing.T) { ), }, }, + { + name: "drop fields by type", + plugin: &Starlark{ + Script: "testdata/drop_string_fields.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, + }, + input: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "b": "42", + "c": 42.0, + "d": "42.0", + "e": true, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "c": 42.0, + "e": true, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "drop fields with unexpected type", + plugin: &Starlark{ + Script: "testdata/drop_fields_with_unexpected_type.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, + }, + input: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "b": "42", + "c": 42.0, + "d": "42.0", + "e": true, + "f": 23.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "c": 42.0, + "d": "42.0", + "e": true, + "f": 23.0, + }, + time.Unix(0, 0), + ), + }, + }, { name: "scale", plugin: &Starlark{ - Script: "testdata/scale.star", - Log: testutil.Logger{}, + Script: "testdata/scale.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("cpu", @@ -2470,8 +2836,9 @@ func TestScript(t *testing.T) { { name: "ratio", plugin: &Starlark{ - Script: "testdata/ratio.star", - Log: testutil.Logger{}, + Script: "testdata/ratio.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("mem", @@ -2495,6 +2862,117 @@ func TestScript(t *testing.T) { ), }, }, + { + name: "logging", + plugin: &Starlark{ + Script: "testdata/logging.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, + }, + input: []telegraf.Metric{ + testutil.MustMetric("log", + map[string]string{}, + map[string]interface{}{ + "debug": "a debug message", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("log", + map[string]string{}, + map[string]interface{}{ + "debug": "a debug message", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "multiple_metrics", + plugin: &Starlark{ + Script: "testdata/multiple_metrics.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, + }, + input: []telegraf.Metric{ + testutil.MustMetric("mm", + map[string]string{}, + map[string]interface{}{ + "value": "a", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("mm2", + map[string]string{}, + map[string]interface{}{ + "value": "b", + }, + time.Unix(0, 0), + ), + testutil.MustMetric("mm1", + map[string]string{}, + map[string]interface{}{ + "value": "a", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "multiple_metrics_with_json", + plugin: &Starlark{ + Script: "testdata/multiple_metrics_with_json.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, + }, + input: []telegraf.Metric{ + testutil.MustMetric("json", + map[string]string{}, + map[string]interface{}{ + "value": "[{\"label\": \"hello\"}, {\"label\": \"world\"}]", + }, + time.Unix(1618488000, 999), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("json", + map[string]string{}, + map[string]interface{}{ + "value": "hello", + }, + time.Unix(1618488000, 999), + ), + testutil.MustMetric("json", + map[string]string{}, + map[string]interface{}{ + "value": "world", + }, + time.Unix(1618488000, 999), + ), + }, + }, + { + name: "fail", + plugin: &Starlark{ + Script: "testdata/fail.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, + }, + input: []telegraf.Metric{ + testutil.MustMetric("fail", + map[string]string{}, + map[string]interface{}{ + "value": 1, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{}, + expectedErrorStr: "fail: The field value should be greater than 1", + }, } for _, tt := range tests { @@ -2769,8 +3247,9 @@ def apply(metric): for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, + Source: tt.source, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, } err := plugin.Init() @@ -2804,14 +3283,19 @@ func TestAllScriptTestData(t *testing.T) { } fn := path t.Run(fn, func(t *testing.T) { - b, err := ioutil.ReadFile(fn) + b, err := os.ReadFile(fn) require.NoError(t, err) lines := strings.Split(string(b), "\n") inputMetrics := parseMetricsFrom(t, lines, "Example Input:") - outputMetrics := parseMetricsFrom(t, lines, "Example Output:") + expectedErrorStr := parseErrorMessage(t, lines, "Example Output Error:") + outputMetrics := []telegraf.Metric{} + if expectedErrorStr == "" { + outputMetrics = parseMetricsFrom(t, lines, "Example Output:") + } plugin := &Starlark{ - Script: fn, - Log: testutil.Logger{}, + Script: fn, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, } require.NoError(t, plugin.Init()) @@ -2822,13 +3306,17 @@ func TestAllScriptTestData(t *testing.T) { for _, m := range inputMetrics { err = plugin.Add(m, acc) - require.NoError(t, err) + if expectedErrorStr != "" { + require.EqualError(t, err, expectedErrorStr) + } else { + require.NoError(t, err) + } } err = plugin.Stop() require.NoError(t, err) - testutil.RequireMetricsEqual(t, outputMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, outputMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics()) }) return nil }) @@ -2863,3 +3351,39 @@ func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []te } return metrics } + +// parses error message out of line protocol following a header +func parseErrorMessage(t *testing.T, lines []string, header string) string { + require.NotZero(t, len(lines), "Expected some lines to parse from .star file, found none") + startIdx := -1 + for i := range lines { + if strings.TrimLeft(lines[i], "# ") == header { + startIdx = i + 1 + break + } + } + if startIdx == -1 { + return "" + } + require.True(t, startIdx < len(lines), fmt.Sprintf("Expected to find the error message after %q, but found none", header)) + return strings.TrimLeft(lines[startIdx], "# ") +} + +func testLoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { + result, err := loadFunc(module, logger) + if err != nil { + return nil, err + } + + if module == "time.star" { + customModule := result["time"].(*starlarkstruct.Module) + customModule.Members["now"] = starlark.NewBuiltin("now", testNow) + result["time"] = customModule + } + + return result, nil +} + +func testNow(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + return starlarktime.Time(time.Date(2021, 4, 15, 12, 0, 0, 999, time.UTC)), nil +} diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/processors/starlark/tag_dict.go index 3d95264382db5..7dbb8c12d0ed6 100644 --- a/plugins/processors/starlark/tag_dict.go +++ b/plugins/processors/starlark/tag_dict.go @@ -58,15 +58,15 @@ func (d TagDict) Attr(name string) (starlark.Value, error) { } var TagDictMethods = map[string]builtinMethod{ - "clear": dict_clear, - "get": dict_get, - "items": dict_items, - "keys": dict_keys, - "pop": dict_pop, - "popitem": dict_popitem, - "setdefault": dict_setdefault, - "update": dict_update, - "values": dict_values, + "clear": dictClear, + "get": dictGet, + "items": dictItems, + "keys": dictKeys, + "pop": dictPop, + "popitem": dictPopitem, + "setdefault": dictSetdefault, + "update": dictUpdate, + "values": dictValues, } // Get implements the starlark.Mapping interface. @@ -162,6 +162,7 @@ func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err err v := starlark.String(value) return v, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") diff --git a/plugins/processors/starlark/testdata/compare_metrics.star b/plugins/processors/starlark/testdata/compare_metrics.star new file mode 100644 index 0000000000000..5e855df443be8 --- /dev/null +++ b/plugins/processors/starlark/testdata/compare_metrics.star @@ -0,0 +1,26 @@ +# Example showing how to keep the last metric in order to compare it with the new one. +# +# Example Input: +# cpu value=10i 1465839830100400201 +# cpu value=8i 1465839830100400301 +# +# Example Output: +# cpu_diff value=2i 1465839830100400301 + +state = { + "last": None +} + +def apply(metric): + # Load from the shared state the metric assigned to the key "last" + last = state["last"] + # Store the deepcopy of the new metric into the shared state and assign it to the key "last" + # NB: To store a metric into the shared state you have to deep copy it + state["last"] = deepcopy(metric) + if last != None: + # Create a new metric named "cpu_diff" + result = Metric("cpu_diff") + # Set the field "value" to the difference between the value of the last metric and the current one + result.fields["value"] = last.fields["value"] - metric.fields["value"] + result.time = metric.time + return result diff --git a/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star b/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star new file mode 100644 index 0000000000000..2b122e19e258a --- /dev/null +++ b/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star @@ -0,0 +1,30 @@ +# Drop fields if they NOT contain values of an expected type. +# +# In this example we ignore fields with an unknown expected type and do not drop them. +# +# Example Input: +# measurement,host=hostname a=1i,b=4.2,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 +# measurement,host=hostname a=1i,b="somestring",c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1i,b=4.2,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 +# measurement,host=hostname a=1i,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 + +load("logging.star", "log") +# loads log.debug(), log.info(), log.warn(), log.error() + +expected_type = { + "a": "int", + "b": "float", + "c": "float", + "d": "string", + "e": "bool" +} + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) != expected_type.get(k, type(v)): + metric.fields.pop(k) + log.warn("Unexpected field type dropped: metric {} had field {} with type {}, but it is expected to be {}".format(metric.name, k, type(v), expected_type.get(k, type(v)))) + + return metric diff --git a/plugins/processors/starlark/testdata/drop_string_fields.star b/plugins/processors/starlark/testdata/drop_string_fields.star new file mode 100644 index 0000000000000..d5c44e497c77c --- /dev/null +++ b/plugins/processors/starlark/testdata/drop_string_fields.star @@ -0,0 +1,14 @@ +# Drop fields if they contain a string. +# +# Example Input: +# measurement,host=hostname a=1,b="somestring" 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1 1597255410000000000 + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) == "string": + metric.fields.pop(k) + + return metric diff --git a/plugins/processors/starlark/testdata/fail.star b/plugins/processors/starlark/testdata/fail.star new file mode 100644 index 0000000000000..484217aad9dba --- /dev/null +++ b/plugins/processors/starlark/testdata/fail.star @@ -0,0 +1,13 @@ +# Example of the way to return a custom error thanks to the built-in function fail +# Returning an error will drop the current metric. Consider using logging instead if you want to keep the metric. +# +# Example Input: +# fail value=1 1465839830100400201 +# +# Example Output Error: +# fail: The field value should be greater than 1 + +def apply(metric): + if metric.fields["value"] <= 1: + return fail("The field value should be greater than 1") + return metric diff --git a/plugins/processors/starlark/testdata/iops.star b/plugins/processors/starlark/testdata/iops.star new file mode 100644 index 0000000000000..fad572f27b77d --- /dev/null +++ b/plugins/processors/starlark/testdata/iops.star @@ -0,0 +1,55 @@ +# Example showing how to obtain IOPS (to aggregate, to produce max_iops). Input can be produced by: +# +#[[inputs.diskio]] +# alias = "diskio1s" +# interval = "1s" +# fieldpass = ["reads", "writes"] +# name_suffix = "1s" +# +# Example Input: +# diskio1s,host=hostname,name=diska reads=0i,writes=0i 1554079521000000000 +# diskio1s,host=hostname,name=diska reads=0i,writes=0i 1554079522000000000 +# diskio1s,host=hostname,name=diska reads=110i,writes=0i 1554079523000000000 +# diskio1s,host=hostname,name=diska reads=110i,writes=30i 1554079524000000000 +# diskio1s,host=hostname,name=diska reads=160i,writes=70i 1554079525000000000 +# +# Example Output: +# diskiops,host=hostname,name=diska readsps=0,writesps=0,iops=0 1554079522000000000 +# diskiops,host=hostname,name=diska readsps=110,writesps=0,iops=110 1554079523000000000 +# diskiops,host=hostname,name=diska readsps=0,writesps=30,iops=30 1554079524000000000 +# diskiops,host=hostname,name=diska readsps=50,writesps=40,iops=90 1554079525000000000 + +state = { } + +def apply(metric): + disk_name = metric.tags["name"] + # Load from the shared last_state the metric for the disk name + last = state.get(disk_name) + # Store the deepcopy of the new metric into the shared last_state and assign it to the key "last" + # NB: To store a metric into the shared last_state you have to deep copy it + state[disk_name] = deepcopy(metric) + if last != None: + # Create the new metrics + diskiops = Metric("diskiops") + # Calculate reads/writes per second + reads = metric.fields["reads"] - last.fields["reads"] + writes = metric.fields["writes"] - last.fields["writes"] + io = reads + writes + interval_seconds = ( metric.time - last.time ) / 1000000000 + diskiops.fields["readsps"] = ( reads / interval_seconds ) + diskiops.fields["writesps"] = ( writes / interval_seconds ) + diskiops.fields["iops"] = ( io / interval_seconds ) + diskiops.tags["name"] = disk_name + diskiops.tags["host"] = metric.tags["host"] + diskiops.time = metric.time + return diskiops + +# This could be aggregated to obtain max IOPS using: +# +# [[aggregators.basicstats]] +# namepass = ["diskiops"] +# period = "60s" +# drop_original = true +# stats = ["max"] +# +# diskiops,host=hostname,name=diska readsps_max=110,writesps_max=40,iops_max=110 1554079525000000000 diff --git a/plugins/processors/starlark/testdata/json_nested.star b/plugins/processors/starlark/testdata/json_nested.star new file mode 100644 index 0000000000000..cc391d6a5f91b --- /dev/null +++ b/plugins/processors/starlark/testdata/json_nested.star @@ -0,0 +1,46 @@ +# +# This code assumes the value parser with data_type='string' is used +# in the input collecting the JSON data. The entire JSON obj/doc will +# be set to a Field named `value` with which this code will work. + +# JSON: +# ``` +# { +# "fields": { +# "LogEndOffset": 339238, +# "LogStartOffset": 339238, +# "NumLogSegments": 1, +# "Size": 0, +# "UnderReplicatedPartitions": 0 +# }, +# "name": "partition", +# "tags": { +# "host": "CUD1-001559", +# "jolokia_agent_url": "http://localhost:7777/jolokia", +# "partition": "1", +# "topic": "qa-kafka-connect-logs" +# }, +# "timestamp": 1591124461 +# } ``` +# +# Example Input: +# json value="[{\"fields\": {\"LogEndOffset\": 339238, \"LogStartOffset\": 339238, \"NumLogSegments\": 1, \"Size\": 0, \"UnderReplicatedPartitions\": 0}, \"name\": \"partition\", \"tags\": {\"host\": \"CUD1-001559\", \"jolokia_agent_url\": \"http://localhost:7777/jolokia\", \"partition\": \"1\", \"topic\": \"qa-kafka-connect-logs\"}, \"timestamp\": 1591124461}]" + +# Example Output: +# partition,host=CUD1-001559,jolokia_agent_url=http://localhost:7777/jolokia,partition=1,topic=qa-kafka-connect-logs LogEndOffset=339238i,LogStartOffset=339238i,NumLogSegments=1i,Size=0i,UnderReplicatedPartitions=0i 1591124461000000000 + + +load("json.star", "json") + +def apply(metric): + j_list = json.decode(metric.fields.get('value')) # input JSON may be an arrow of objects + metrics = [] + for obj in j_list: + new_metric = Metric("partition") # We want a new InfluxDB/Telegraf metric each iteration + for tag in obj["tags"].items(): # 4 Tags to iterate through + new_metric.tags[str(tag[0])] = tag[1] + for field in obj["fields"].items(): # 5 Fields to iterate through + new_metric.fields[str(field[0])] = field[1] + new_metric.time = int(obj["timestamp"] * 1e9) + metrics.append(new_metric) + return metrics diff --git a/plugins/processors/starlark/testdata/logging.star b/plugins/processors/starlark/testdata/logging.star new file mode 100644 index 0000000000000..8be85eb968cf1 --- /dev/null +++ b/plugins/processors/starlark/testdata/logging.star @@ -0,0 +1,19 @@ +# Example of the way to log a message with all the supported levels +# using the logger of Telegraf. +# +# Example Input: +# log debug="a debug message" 1465839830100400201 +# +# Example Output: +# log debug="a debug message" 1465839830100400201 + +load("logging.star", "log") +# loads log.debug(), log.info(), log.warn(), log.error() + +def apply(metric): + log.debug("debug: {}".format(metric.fields["debug"])) + log.info("an info message") + log.warn("a warning message") + log.error("an error message") + return metric + \ No newline at end of file diff --git a/plugins/processors/starlark/testdata/math.star b/plugins/processors/starlark/testdata/math.star new file mode 100644 index 0000000000000..f63669acebf82 --- /dev/null +++ b/plugins/processors/starlark/testdata/math.star @@ -0,0 +1,14 @@ +# Example showing how the math module can be used to compute the value of a field. +# +# Example Input: +# math value=10000i 1465839830100400201 +# +# Example Output: +# math result=4 1465839830100400201 + +load('math.star', 'math') +# loads all the functions and constants defined in the math module + +def apply(metric): + metric.fields["result"] = math.log(metric.fields.pop('value'), 10) + return metric diff --git a/plugins/processors/starlark/testdata/multiple_metrics.star b/plugins/processors/starlark/testdata/multiple_metrics.star new file mode 100644 index 0000000000000..6abf567f66c97 --- /dev/null +++ b/plugins/processors/starlark/testdata/multiple_metrics.star @@ -0,0 +1,26 @@ +# Example showing how to create several metrics using the Starlark processor. +# +# Example Input: +# mm value="a" 1465839830100400201 +# +# Example Output: +# mm2 value="b" 1465839830100400201 +# mm1 value="a" 1465839830100400201 + +def apply(metric): + # Initialize a list of metrics + metrics = [] + # Create a new metric whose name is "mm2" + metric2 = Metric("mm2") + # Set the field "value" to b + metric2.fields["value"] = "b" + # Reset the time (only needed for testing purpose) + metric2.time = metric.time + # Add metric2 to the list of metrics + metrics.append(metric2) + # Rename the original metric to "mm1" + metric.name = "mm1" + # Add metric to the list of metrics + metrics.append(metric) + # Return the created list of metrics + return metrics diff --git a/plugins/processors/starlark/testdata/multiple_metrics_with_json.star b/plugins/processors/starlark/testdata/multiple_metrics_with_json.star new file mode 100644 index 0000000000000..fa4dfcc483e1b --- /dev/null +++ b/plugins/processors/starlark/testdata/multiple_metrics_with_json.star @@ -0,0 +1,27 @@ +# Example showing how to create several metrics from a json array. +# +# Example Input: +# json value="[{\"label\": \"hello\"}, {\"label\": \"world\"}]" +# +# Example Output: +# json value="hello" 1618488000000000999 +# json value="world" 1618488000000000999 + +# loads json.encode(), json.decode(), json.indent() +load("json.star", "json") +load("time.star", "time") + +def apply(metric): + # Initialize a list of metrics + metrics = [] + # Loop over the json array stored into the field + for obj in json.decode(metric.fields['value']): + # Create a new metric whose name is "json" + current_metric = Metric("json") + # Set the field "value" to the label extracted from the current json object + current_metric.fields["value"] = obj["label"] + # Reset the time (only needed for testing purpose) + current_metric.time = time.now().unix_nano + # Add metric to the list of metrics + metrics.append(current_metric) + return metrics diff --git a/plugins/processors/starlark/testdata/pivot.star b/plugins/processors/starlark/testdata/pivot.star index f32ebf45d9763..c57d13d5fa420 100644 --- a/plugins/processors/starlark/testdata/pivot.star +++ b/plugins/processors/starlark/testdata/pivot.star @@ -4,10 +4,10 @@ In this example it pivots the value of key `sensor` to be the key of the value in key `value` Example Input: -temperature sensor="001A0",value=111.48 +temperature sensor="001A0",value=111.48 1618488000000000999 Example Output: -temperature 001A0=111.48 +temperature 001A0=111.48 1618488000000000999 ''' def apply(metric): diff --git a/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star b/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star new file mode 100644 index 0000000000000..87c4e764bf678 --- /dev/null +++ b/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star @@ -0,0 +1,16 @@ +# Specifically for prometheus remote write - renames the measurement name to the fieldname. Renames the fieldname to value. +# Assumes there is only one field as is the case for prometheus remote write. +# +# Example Input: +# prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1618488000000000999 +# +# Example Output: +# go_gc_duration_seconds,instance=localhost:9090,job=prometheus,quantile=0.99 value=4.63 1618488000000000999 + +def apply(metric): + if metric.name == "prometheus_remote_write": + for k, v in metric.fields.items(): + metric.name = k + metric.fields["value"] = v + metric.fields.pop(k) + return metric \ No newline at end of file diff --git a/plugins/processors/starlark/testdata/schema_sizing.star b/plugins/processors/starlark/testdata/schema_sizing.star new file mode 100644 index 0000000000000..c716a153c7a23 --- /dev/null +++ b/plugins/processors/starlark/testdata/schema_sizing.star @@ -0,0 +1,96 @@ +# Produces a new Line of statistics about the Fields +# Drops the original metric +# +# Example Input: +# logstash,environment_id=EN456,property_id=PR789,request_type=ingress,stack_id=engd asn=1313i,cache_response_code=202i,colo_code="LAX",colo_id=12i,compute_time=28736i,edge_end_timestamp=1611085500320i,edge_start_timestamp=1611085496208i,id="1b5c67ed-dfd0-4d30-99bd-84f0a9c5297b_76af1809-29d1-4b35-a0cf-39797458275c",parent_ray_id="00",processing_details="ok",rate_limit_id=0i,ray_id="76af1809-29d1-4b35-a0cf-39797458275c",request_bytes=7777i,request_host="engd-08364a825824e04f0a494115.reactorstream.dev",request_id="1b5c67ed-dfd0-4d30-99bd-84f0a9c5297b",request_result="succeeded",request_uri="/ENafcb2798a9be4bb7bfddbf35c374db15",response_code=200i,subrequest=false,subrequest_count=1i,user_agent="curl/7.64.1" 1611085496208 +# +# Example Output: +# sizing,measurement=logstash,environment_id=EN456,property_id=PR789,request_type=ingress,stack_id=engd tag_count=4,tag_key_avg_length=11.25,tag_value_avg_length=5.25,int_key_avg_length=13.4,int_avg_length=4.9,int_count=10,bool_key_avg_length=10,bool_avg_length=5,bool_count=1,str_key_avg_length=10.5,str_avg_length=25.4,str_count=10 1611085496208 + +def apply(metric): + new_metric = Metric("sizing") + num_tags = len(metric.tags.items()) + new_metric.fields["tag_count"] = float(num_tags) + new_metric.fields["tag_key_avg_length"] = sum(map(len, metric.tags.keys())) / num_tags + new_metric.fields["tag_value_avg_length"] = sum(map(len, metric.tags.values())) / num_tags + + new_metric.tags["measurement"] = metric.name + + new_metric.tags.update(metric.tags) + + ints, floats, bools, strs = [], [], [], [] + for field in metric.fields.items(): + key, value = field[0], field[1] + if type(value) == "int": + ints.append(field) + elif type(value) == "float": + floats.append(field) + elif type(value) == "bool": + bools.append(field) + elif type(value) == "string": + strs.append(field) + + if len(ints) > 0: + int_keys = [i[0] for i in ints] + int_vals = [i[1] for i in ints] + produce_pairs(new_metric, int_keys, "int", key=True) + produce_pairs(new_metric, int_vals, "int") + if len(floats) > 0: + float_keys = [i[0] for i in floats] + float_vals = [i[1] for i in floats] + produce_pairs(new_metric, float_keys, "float", key=True) + produce_pairs(new_metric, float_vals, "float") + if len(bools) > 0: + bool_keys = [i[0] for i in bools] + bool_vals = [i[1] for i in bools] + produce_pairs(new_metric, bool_keys, "bool", key=True) + produce_pairs(new_metric, bool_vals, "bool") + if len(strs) > 0: + str_keys = [i[0] for i in strs] + str_vals = [i[1] for i in strs] + produce_pairs(new_metric, str_keys, "str", key=True) + produce_pairs(new_metric, str_vals, "str") + + new_metric.time = metric.time + return new_metric + +def produce_pairs(metric, li, field_type, key=False): + lens = elem_lengths(li) + counts = count_lengths(lens) + metric.fields["{}_count".format(field_type)] = float(len(li)) + if key: + metric.fields["{}_key_avg_length".format(field_type)] = float(mean(lens)) + else: + metric.fields["{}_avg_length".format(field_type)] = float(mean(lens)) + + +def elem_lengths(li): + if type(li[0]) in ("int", "float", "bool"): + return [len(str(elem)) for elem in li] + else: + return [len(elem) for elem in li] + +def count_lengths(li): + # Returns dict of counts of each occurrence of length in a list of lengths + lens = [] + counts = [] + for elem in li: + if elem not in lens: + lens.append(elem) + counts.append(1) + else: + index = lens.index(elem) + counts[index] += 1 + return dict(zip(lens, counts)) + +def map(f, li): + return [f(x) for x in li] + +def sum(li): + sum = 0 + for i in li: + sum += i + return sum + +def mean(li): + return sum(li)/len(li) diff --git a/plugins/processors/starlark/testdata/time_date.star b/plugins/processors/starlark/testdata/time_date.star new file mode 100644 index 0000000000000..7be7f8fa7fcf2 --- /dev/null +++ b/plugins/processors/starlark/testdata/time_date.star @@ -0,0 +1,19 @@ +# Example of parsing a date out of a field and modifying the metric to inject the year, month and day. +# +# Example Input: +# time value="2009-06-12T12:06:10.000000099" 1465839830100400201 +# +# Example Output: +# time year=2009i,month=6i,day=12i 1465839830100400201 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + date = time.parse_time(metric.fields.get('value'), format="2006-01-02T15:04:05.999999999", location="UTC") + metric.fields.pop('value') + metric.fields["year"] = date.year + metric.fields["month"] = date.month + metric.fields["day"] = date.day + return metric diff --git a/plugins/processors/starlark/testdata/time_duration.star b/plugins/processors/starlark/testdata/time_duration.star new file mode 100644 index 0000000000000..773e20744cce6 --- /dev/null +++ b/plugins/processors/starlark/testdata/time_duration.star @@ -0,0 +1,17 @@ +# Example of parsing a duration out of a field and modifying the metric to inject the equivalent in seconds. +# +# Example Input: +# time value="3m35s" 1465839830100400201 +# +# Example Output: +# time seconds=215 1465839830100400201 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + duration = time.parse_duration(metric.fields.get('value')) + metric.fields.pop('value') + metric.fields["seconds"] = duration.seconds + return metric diff --git a/plugins/processors/starlark/testdata/time_set_timestamp.star b/plugins/processors/starlark/testdata/time_set_timestamp.star new file mode 100644 index 0000000000000..bc64457dce880 --- /dev/null +++ b/plugins/processors/starlark/testdata/time_set_timestamp.star @@ -0,0 +1,15 @@ +# Example of setting the metric timestamp to the current time. +# +# Example Input: +# time result="OK" 1515581000000000000 +# +# Example Output: +# time result="OK" 1618488000000000999 + +load('time.star', 'time') + +def apply(metric): + # You can set the timestamp by using the current time. + metric.time = time.now().unix_nano + + return metric \ No newline at end of file diff --git a/plugins/processors/starlark/testdata/time_timestamp.star b/plugins/processors/starlark/testdata/time_timestamp.star new file mode 100644 index 0000000000000..73e885b26c3dc --- /dev/null +++ b/plugins/processors/starlark/testdata/time_timestamp.star @@ -0,0 +1,22 @@ +# Example of filtering metrics based on the timestamp in seconds. +# +# Example Input: +# time result="KO" 1616020365100400201 +# time result="OK" 1616150517100400201 +# +# Example Output: +# time result="OK" 1616150517100400201 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + # 1616198400 sec = Saturday, March 20, 2021 0:00:00 GMT + refDate = time.from_timestamp(1616198400) + # 1616020365 sec = Wednesday, March 17, 2021 22:32:45 GMT + # 1616150517 sec = Friday, March 19, 2021 10:41:57 GMT + metric_date = time.from_timestamp(int(metric.time / 1e9)) + # Only keep metrics with a timestamp that is not more than 24 hours before the reference date + if refDate - time.parse_duration("24h") < metric_date: + return metric diff --git a/plugins/processors/starlark/testdata/time_timestamp_nanos.star b/plugins/processors/starlark/testdata/time_timestamp_nanos.star new file mode 100644 index 0000000000000..d305cb1f22e9f --- /dev/null +++ b/plugins/processors/starlark/testdata/time_timestamp_nanos.star @@ -0,0 +1,22 @@ +# Example of filtering metrics based on the timestamp with nanoseconds. +# +# Example Input: +# time result="KO" 1617900602123455999 +# time result="OK" 1617900602123456789 +# +# Example Output: +# time result="OK" 1617900602123456789 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + # 1617900602123457000 nanosec = Thursday, April 8, 2021 16:50:02.123457000 GMT + refDate = time.from_timestamp(1617900602, 123457000) + # 1617900602123455999 nanosec = Thursday, April 8, 2021 16:50:02.123455999 GMT + # 1617900602123456789 nanosec = Thursday, April 8, 2021 16:50:02.123456789 GMT + metric_date = time.from_timestamp(int(metric.time / 1e9), int(metric.time % 1e9)) + # Only keep metrics with a timestamp that is not more than 1 microsecond before the reference date + if refDate - time.parse_duration("1us") < metric_date: + return metric diff --git a/plugins/processors/starlark/testdata/value_filter.star b/plugins/processors/starlark/testdata/value_filter.star index eeb2432f6679f..a4ceb28a68a72 100644 --- a/plugins/processors/starlark/testdata/value_filter.star +++ b/plugins/processors/starlark/testdata/value_filter.star @@ -4,11 +4,11 @@ In this example we look at the `value` field of the metric. If the value is zeor, we delete all the fields, effectively dropping the metric. Example Input: -temperature sensor="001A0",value=111.48 -temperature sensor="001B0",value=0.0 +temperature sensor="001A0",value=111.48 1618488000000000999 +temperature sensor="001B0",value=0.0 1618488000000000999 Example Output: -temperature sensor="001A0",value=111.48 +temperature sensor="001A0",value=111.48 1618488000000000999 ''' def apply(metric): diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index a7aa0e2a585bd..e0fcec9103151 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -14,6 +14,7 @@ Implemented functions are: - replace - left - base64decode +- valid_utf8 Please note that in this implementation these are processed in the order that they appear above. @@ -78,6 +79,12 @@ If you'd like to apply multiple processings to the same `tag_key` or `field_key` ## Decode a base64 encoded utf-8 string # [[processors.strings.base64decode]] # field = "message" + + ## Sanitize a string to ensure it is a valid utf-8 string + ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty + # [[processors.strings.valid_utf8]] + # field = "message" + # replacement = "" ``` #### Trim, TrimLeft, TrimRight diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 1ac6c61019c6f..7b2d3251ea381 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -22,6 +22,7 @@ type Strings struct { Replace []converter `toml:"replace"` Left []converter `toml:"left"` Base64Decode []converter `toml:"base64decode"` + ValidUTF8 []converter `toml:"valid_utf8"` converters []converter init bool @@ -42,6 +43,7 @@ type converter struct { Old string New string Width int + Replacement string fn ConvertFunc } @@ -98,6 +100,12 @@ const sampleConfig = ` ## Decode a base64 encoded utf-8 string # [[processors.strings.base64decode]] # field = "message" + + ## Sanitize a string to ensure it is a valid utf-8 string + ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty + # [[processors.strings.valid_utf8]] + # field = "message" + # replacement = "" ` func (s *Strings) SampleConfig() string { @@ -287,9 +295,9 @@ func (s *Strings) initOnce() { newString := strings.Replace(s, c.Old, c.New, -1) if newString == "" { return s - } else { - return newString } + + return newString } s.converters = append(s.converters, c) } @@ -298,9 +306,9 @@ func (s *Strings) initOnce() { c.fn = func(s string) string { if len(s) < c.Width { return s - } else { - return s[:c.Width] } + + return s[:c.Width] } s.converters = append(s.converters, c) } @@ -318,6 +326,11 @@ func (s *Strings) initOnce() { } s.converters = append(s.converters, c) } + for _, c := range s.ValidUTF8 { + c := c + c.fn = func(s string) string { return strings.ToValidUTF8(s, c.Replacement) } + s.converters = append(s.converters, c) + } s.init = true } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index 2c1be510ef9b6..c4201188436e6 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -12,7 +12,7 @@ import ( ) func newM1() telegraf.Metric { - m1, _ := metric.New("IIS_log", + m1 := metric.New("IIS_log", map[string]string{ "verb": "GET", "s-computername": "MIXEDCASE_hostname", @@ -27,7 +27,7 @@ func newM1() telegraf.Metric { } func newM2() telegraf.Metric { - m1, _ := metric.New("IIS_log", + m1 := metric.New("IIS_log", map[string]string{ "verb": "GET", "S-ComputerName": "MIXEDCASE_hostname", @@ -795,7 +795,7 @@ func TestMultipleConversions(t *testing.T) { }, } - m, _ := metric.New("IIS_log", + m := metric.New("IIS_log", map[string]string{ "verb": "GET", "resp_code": "200", @@ -856,7 +856,7 @@ func TestReadmeExample(t *testing.T) { }, } - m, _ := metric.New("iis_log", + m := metric.New("iis_log", map[string]string{ "verb": "get", "uri_stem": "/API/HealthCheck", @@ -895,7 +895,7 @@ func TestReadmeExample(t *testing.T) { func newMetric(name string) telegraf.Metric { tags := map[string]string{} fields := map[string]interface{}{} - m, _ := metric.New(name, tags, fields, time.Now()) + m := metric.New(name, tags, fields, time.Now()) return m } @@ -1047,3 +1047,113 @@ func TestBase64Decode(t *testing.T) { }) } } + +func TestValidUTF8(t *testing.T) { + tests := []struct { + name string + plugin *Strings + metric []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "valid utf-8 keeps original string", + plugin: &Strings{ + ValidUTF8: []converter{ + { + Field: "message", + Replacement: "r", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "non-valid utf-8 modifies original string", + plugin: &Strings{ + ValidUTF8: []converter{ + { + Field: "message", + Replacement: "r", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "ho" + string([]byte{0xff}) + "wdy", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "horwdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "non-valid utf-8 and empty replacement removes invalid characters", + plugin: &Strings{ + ValidUTF8: []converter{ + { + Field: "message", + Replacement: "", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "ho" + string([]byte{0xff}) + "wdy", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.plugin.Apply(tt.metric...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} diff --git a/plugins/processors/tag_limit/tag_limit.go b/plugins/processors/tag_limit/tag_limit.go index 41353a8f863c4..1b48739a189f1 100644 --- a/plugins/processors/tag_limit/tag_limit.go +++ b/plugins/processors/tag_limit/tag_limit.go @@ -39,8 +39,8 @@ func (d *TagLimit) initOnce() error { } d.keepTags = make(map[string]string) // convert list of tags-to-keep to a map so we can do constant-time lookups - for _, tag_key := range d.Keep { - d.keepTags[tag_key] = "" + for _, tagKey := range d.Keep { + d.keepTags[tagKey] = "" } d.init = true return nil diff --git a/plugins/processors/tag_limit/tag_limit_test.go b/plugins/processors/tag_limit/tag_limit_test.go index 9412d866b78e8..d9c361ed07296 100644 --- a/plugins/processors/tag_limit/tag_limit_test.go +++ b/plugins/processors/tag_limit/tag_limit_test.go @@ -16,7 +16,7 @@ func MustMetric(name string, tags map[string]string, fields map[string]interface if fields == nil { fields = map[string]interface{}{} } - m, _ := metric.New(name, tags, fields, metricTime) + m := metric.New(name, tags, fields, metricTime) return m } diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md index 308d4f9f85f05..cfcb0b2176d38 100644 --- a/plugins/processors/topk/README.md +++ b/plugins/processors/topk/README.md @@ -1,16 +1,18 @@ # TopK Processor Plugin -The TopK processor plugin is a filter designed to get the top series over a period of time. It can be tweaked to do its top k computation over a period of time, so spikes can be smoothed out. +The TopK processor plugin is a filter designed to get the top series over a period of time. It can be tweaked to calculate the top metrics via different aggregation functions. This processor goes through these steps when processing a batch of metrics: - 1. Groups metrics in buckets using their tags and name as key - 2. Aggregates each of the selected fields for each bucket by the selected aggregation function (sum, mean, etc) - 3. Orders the buckets by one of the generated aggregations, returns all metrics in the top `K` buckets, then reorders the buckets by the next of the generated aggregations, returns all metrics in the top `K` buckets, etc, etc, etc, until it runs out of fields. + 1. Groups measurements in buckets based on their tags and name + 2. Every N seconds, for each bucket, for each selected field: aggregate all the measurements using a given aggregation function (min, sum, mean, etc) and the field. + 3. For each computed aggregation: order the buckets by the aggregation, then returns all measurements in the top `K` buckets -The plugin makes sure not to duplicate metrics - -Note that depending on the amount of metrics on each computed bucket, more than `K` metrics may be returned +Notes: + * The deduplicates metrics + * The name of the measurement is always used when grouping it + * Depending on the amount of metrics on each bucket, more than `K` series may be returned + * If a measurement does not have one of the selected fields, it is dropped from the aggregation ### Configuration: @@ -19,46 +21,40 @@ Note that depending on the amount of metrics on each computed bucket, more than ## How many seconds between aggregations # period = 10 - ## How many top metrics to return + ## How many top buckets to return # k = 10 - ## Over which tags should the aggregation be done. Globs can be specified, in - ## which case any tag matching the glob will aggregated over. If set to an - ## empty list is no aggregation over tags is done + ## Based on which tags should the buckets be computed. Globs can be specified. + ## If set to an empty list tags are not considered when creating the buckets # group_by = ['*'] - ## Over which fields are the top k are calculated + ## Over which fields is the aggregation done # fields = ["value"] - ## What aggregation to use. Options: sum, mean, min, max + ## What aggregation function to use. Options: sum, mean, min, max # aggregation = "mean" - ## Instead of the top k largest metrics, return the bottom k lowest metrics + ## Instead of the top k buckets, return the bottom k buckets # bottomk = false - ## The plugin assigns each metric a GroupBy tag generated from its name and - ## tags. If this setting is different than "" the plugin will add a - ## tag (which name will be the value of this setting) to each metric with - ## the value of the calculated GroupBy tag. Useful for debugging + ## This setting provides a way to know wich metrics where group together. + ## Add a tag (which name will be the value of this setting) to each metric. + ## The value will be the tags used to pick its bucket. # add_groupby_tag = "" - ## These settings provide a way to know the position of each metric in - ## the top k. The 'add_rank_field' setting allows to specify for which - ## fields the position is required. If the list is non empty, then a field - ## will be added to each and every metric for each string present in this - ## setting. This field will contain the ranking of the group that - ## the metric belonged to when aggregated over that field. + ## This setting provides a way to know the position of each metric's bucket in the top k + ## If the list is non empty, a field will be added to each and every metric + ## for each string present in this setting. This field will contain the ranking + ## of the bucket that the metric belonged to when aggregated over that field. ## The name of the field will be set to the name of the aggregation field, ## suffixed with the string '_topk_rank' # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_aggregate_field' setting allows to - ## specify for which fields the final aggregation value is required. If the - ## list is non empty, then a field will be added to each every metric for - ## each field present in this setting. This field will contain - ## the computed aggregation for the group that the metric belonged to when - ## aggregated over that field. + ## when aggregating metrics. If the list is non empty, then a field will be + ## added to each every metric for each field present in this setting. + ## This field will contain the computed aggregation for the bucket that the + ## metric belonged to when aggregated over that field. ## The name of the field will be set to the name of the aggregation field, ## suffixed with the string '_topk_aggregate' # add_aggregate_fields = [] diff --git a/plugins/processors/topk/test_sets.go b/plugins/processors/topk/test_sets.go index aea2c44c8052b..69f957817eca0 100644 --- a/plugins/processors/topk/test_sets.go +++ b/plugins/processors/topk/test_sets.go @@ -1,13 +1,14 @@ package topk import ( + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "time" ) ///// Test set 1 ///// -var metric11, _ = metric.New( +var metric11 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -17,7 +18,7 @@ var metric11, _ = metric.New( time.Now(), ) -var metric12, _ = metric.New( +var metric12 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -26,7 +27,7 @@ var metric12, _ = metric.New( time.Now(), ) -var metric13, _ = metric.New( +var metric13 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -36,7 +37,7 @@ var metric13, _ = metric.New( time.Now(), ) -var metric14, _ = metric.New( +var metric14 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -46,7 +47,7 @@ var metric14, _ = metric.New( time.Now(), ) -var metric15, _ = metric.New( +var metric15 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -60,7 +61,7 @@ var metric15, _ = metric.New( var MetricsSet1 = []telegraf.Metric{metric11, metric12, metric13, metric14, metric15} ///// Test set 2 ///// -var metric21, _ = metric.New( +var metric21 = metric.New( "metric1", map[string]string{ "id": "1", @@ -77,7 +78,7 @@ var metric21, _ = metric.New( time.Now(), ) -var metric22, _ = metric.New( +var metric22 = metric.New( "metric1", map[string]string{ "id": "2", @@ -93,7 +94,7 @@ var metric22, _ = metric.New( time.Now(), ) -var metric23, _ = metric.New( +var metric23 = metric.New( "metric1", map[string]string{ "id": "3", @@ -110,7 +111,7 @@ var metric23, _ = metric.New( time.Now(), ) -var metric24, _ = metric.New( +var metric24 = metric.New( "metric2", map[string]string{ "id": "4", @@ -126,7 +127,7 @@ var metric24, _ = metric.New( time.Now(), ) -var metric25, _ = metric.New( +var metric25 = metric.New( "metric2", map[string]string{ "id": "5", @@ -143,7 +144,7 @@ var metric25, _ = metric.New( time.Now(), ) -var metric26, _ = metric.New( +var metric26 = metric.New( "metric2", map[string]string{ "id": "6", diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 907ec1cc41fc6..b7c8f50d9dfde 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -2,28 +2,28 @@ package topk import ( "fmt" - "log" "math" "sort" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/processors" ) type TopK struct { - Period internal.Duration - K int - GroupBy []string `toml:"group_by"` - Fields []string - Aggregation string - Bottomk bool - AddGroupByTag string `toml:"add_groupby_tag"` - AddRankFields []string `toml:"add_rank_fields"` - AddAggregateFields []string `toml:"add_aggregate_fields"` + Period config.Duration `toml:"period"` + K int `toml:"k"` + GroupBy []string `toml:"group_by"` + Fields []string `toml:"fields"` + Aggregation string `toml:"aggregation"` + Bottomk bool `toml:"bottomk"` + AddGroupByTag string `toml:"add_groupby_tag"` + AddRankFields []string `toml:"add_rank_fields"` + AddAggregateFields []string `toml:"add_aggregate_fields"` + Log telegraf.Logger `toml:"-"` cache map[string][]telegraf.Metric tagsGlobs filter.Filter @@ -37,7 +37,7 @@ func New() *TopK { topk := TopK{} // Setup defaults - topk.Period = internal.Duration{Duration: time.Second * time.Duration(10)} + topk.Period = config.Duration(time.Second * time.Duration(10)) topk.K = 10 topk.Fields = []string{"value"} topk.Aggregation = "mean" @@ -110,11 +110,7 @@ func sortMetrics(metrics []MetricAggregation, field string, reverse bool) { less := func(i, j int) bool { iv := metrics[i].values[field] jv := metrics[j].values[field] - if iv < jv { - return true - } else { - return false - } + return iv < jv } if reverse { @@ -174,7 +170,7 @@ func (t *TopK) groupBy(m telegraf.Metric) { if err != nil { // If we could not generate the groupkey, fail hard // by dropping this and all subsequent metrics - log.Printf("E! [processors.topk]: could not generate group key: %v", err) + t.Log.Errorf("Could not generate group key: %v", err) return } @@ -235,7 +231,7 @@ func (t *TopK) Apply(in ...telegraf.Metric) []telegraf.Metric { // If enough time has passed elapsed := time.Since(t.lastAggregation) - if elapsed >= t.Period.Duration { + if elapsed >= time.Duration(t.Period) { return t.push() } @@ -269,7 +265,7 @@ func (t *TopK) push() []telegraf.Metric { if err != nil { // If we could not generate the aggregation // function, fail hard by dropping all metrics - log.Printf("E! [processors.topk]: %v", err) + t.Log.Errorf("%v", err) return []telegraf.Metric{} } for k, ms := range t.cache { @@ -277,12 +273,10 @@ func (t *TopK) push() []telegraf.Metric { } // The return value that will hold the returned metrics - var ret []telegraf.Metric = make([]telegraf.Metric, 0, 0) - + var ret = make([]telegraf.Metric, 0) // Get the top K metrics for each field and add them to the return value addedKeys := make(map[string]bool) for _, field := range t.Fields { - // Sort the aggregations sortMetrics(aggregations, field, t.Bottomk) @@ -318,11 +312,8 @@ func (t *TopK) push() []telegraf.Metric { result := make([]telegraf.Metric, 0, len(ret)) for _, m := range ret { - copy, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) - if err != nil { - continue - } - result = append(result, copy) + newMetric := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) + result = append(result, newMetric) } return result @@ -342,7 +333,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } val, ok := convert(fieldVal) if !ok { - log.Printf("Cannot convert value '%s' from metric '%s' with tags '%s'", + t.Log.Infof("Cannot convert value '%s' from metric '%s' with tags '%s'", m.Fields()[field], m.Name(), m.Tags()) continue } @@ -408,12 +399,12 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } val, ok := convert(fieldVal) if !ok { - log.Printf("Cannot convert value '%s' from metric '%s' with tags '%s'", + t.Log.Infof("Cannot convert value '%s' from metric '%s' with tags '%s'", m.Fields()[field], m.Name(), m.Tags()) continue } mean[field] += val - meanCounters[field] += 1 + meanCounters[field]++ } } // Divide by the number of recorded measurements collected for every field @@ -424,7 +415,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr continue } mean[k] = mean[k] / meanCounters[k] - noMeasurementsFound = noMeasurementsFound && false + noMeasurementsFound = false } if noMeasurementsFound { @@ -434,7 +425,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr }, nil default: - return nil, fmt.Errorf("Unknown aggregation function '%s'. No metrics will be processed", t.Aggregation) + return nil, fmt.Errorf("unknown aggregation function '%s', no metrics will be processed", t.Aggregation) } } diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 928111b29d7da..27e18e34a564e 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -5,10 +5,12 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) +var oneSecondDuration = config.Duration(time.Second) + // Key, value pair that represents a telegraf.Metric Field type field struct { key string @@ -117,14 +119,10 @@ func equalSets(l1 []telegraf.Metric, l2 []telegraf.Metric) bool { return subSet(l1, l2) && subSet(l2, l1) } -func createDuration(t int) internal.Duration { - return internal.Duration{Duration: time.Second * time.Duration(t)} -} - func runAndCompare(topk *TopK, metrics []telegraf.Metric, answer []telegraf.Metric, testID string, t *testing.T) { // Sleep for `period`, otherwise the processor will only // cache the metrics, but it will not process them - time.Sleep(topk.Period.Duration) + time.Sleep(time.Duration(topk.Period)) // Run the processor ret := topk.Apply(metrics...) @@ -139,11 +137,9 @@ func runAndCompare(topk *TopK, metrics []telegraf.Metric, answer []telegraf.Metr // Smoke tests func TestTopkAggregatorsSmokeTests(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.Fields = []string{"a"} topk.GroupBy = []string{"tag_name"} @@ -162,11 +158,9 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) { // AddAggregateFields + Mean aggregator func TestTopkMeanAddAggregateFields(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.Aggregation = "mean" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -192,11 +186,9 @@ func TestTopkMeanAddAggregateFields(t *testing.T) { // AddAggregateFields + Sum aggregator func TestTopkSumAddAggregateFields(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.Aggregation = "sum" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -222,11 +214,9 @@ func TestTopkSumAddAggregateFields(t *testing.T) { // AddAggregateFields + Max aggregator func TestTopkMaxAddAggregateFields(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.Aggregation = "max" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -252,11 +242,9 @@ func TestTopkMaxAddAggregateFields(t *testing.T) { // AddAggregateFields + Min aggregator func TestTopkMinAddAggregateFields(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.Aggregation = "min" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -282,11 +270,9 @@ func TestTopkMinAddAggregateFields(t *testing.T) { // GroupBy func TestTopkGroupby1(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"value"} @@ -308,11 +294,9 @@ func TestTopkGroupby1(t *testing.T) { runAndCompare(&topk, input, answer, "GroupBy test 1", t) } func TestTopkGroupby2(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "mean" topk.AddAggregateFields = []string{"value"} @@ -338,11 +322,9 @@ func TestTopkGroupby2(t *testing.T) { runAndCompare(&topk, input, answer, "GroupBy test 2", t) } func TestTopkGroupby3(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 1 topk.Aggregation = "min" topk.AddAggregateFields = []string{"value"} @@ -365,11 +347,9 @@ func TestTopkGroupby3(t *testing.T) { // GroupBy + Fields func TestTopkGroupbyFields1(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 4 // This settings generate less than 3 groups topk.Aggregation = "mean" topk.AddAggregateFields = []string{"A"} @@ -393,11 +373,9 @@ func TestTopkGroupbyFields1(t *testing.T) { } func TestTopkGroupbyFields2(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 2 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"B", "C"} @@ -422,11 +400,9 @@ func TestTopkGroupbyFields2(t *testing.T) { // GroupBy metric name func TestTopkGroupbyMetricName1(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 1 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"value"} @@ -449,11 +425,9 @@ func TestTopkGroupbyMetricName1(t *testing.T) { } func TestTopkGroupbyMetricName2(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 2 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"A", "value"} @@ -478,11 +452,9 @@ func TestTopkGroupbyMetricName2(t *testing.T) { // BottomK func TestTopkBottomk(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" topk.GroupBy = []string{"tag1", "tag3"} @@ -505,11 +477,9 @@ func TestTopkBottomk(t *testing.T) { // GroupByKeyTag func TestTopkGroupByKeyTag(t *testing.T) { - // Build the processor - var topk TopK - topk = *New() - topk.Period = createDuration(1) + topk := *New() + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" topk.GroupBy = []string{"tag1", "tag3"} diff --git a/plugins/serializers/carbon2/README.md b/plugins/serializers/carbon2/README.md index e32a420aec0af..3ad54a1699d3a 100644 --- a/plugins/serializers/carbon2/README.md +++ b/plugins/serializers/carbon2/README.md @@ -21,6 +21,11 @@ The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 f ## * "metric_includes_field" ## * "" - defaults to "field_separate" # carbon2_format = "field_separate" + + ## Character used for replacing sanitized characters. By default ":" is used. + ## The following character set is being replaced with sanitize replace char: + ## !@#$%^&*()+`'\"[]{};<>,?/\\|= + # carbon2_sanitize_replace_char = ":" ``` Standard form: @@ -52,6 +57,17 @@ metric=name_field_2 host=foo 4 1234567890 metric=name_field_N host=foo 59 1234567890 ``` +### Metric name sanitization + +In order to sanitize the metric name one can specify `carbon2_sanitize_replace_char` +in order to replace the following characters in the metric name: + +``` +!@#$%^&*()+`'\"[]{};<>,?/\\|= +``` + +By default they will be replaced with `:`. + ## Metrics The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields. diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go index 1b05d4cb2d4c7..4eb5798d64a69 100644 --- a/plugins/serializers/carbon2/carbon2.go +++ b/plugins/serializers/carbon2/carbon2.go @@ -2,6 +2,7 @@ package carbon2 import ( "bytes" + "errors" "fmt" "strconv" "strings" @@ -23,11 +24,23 @@ var formats = map[format]struct{}{ Carbon2FormatMetricIncludesField: {}, } +const ( + DefaultSanitizeReplaceChar = ":" + sanitizedChars = "!@#$%^&*()+`'\"[]{};<>,?/\\|=" +) + type Serializer struct { - metricsFormat format + metricsFormat format + sanitizeReplacer *strings.Replacer } -func NewSerializer(metricsFormat string) (*Serializer, error) { +func NewSerializer(metricsFormat string, sanitizeReplaceChar string) (*Serializer, error) { + if sanitizeReplaceChar == "" { + sanitizeReplaceChar = DefaultSanitizeReplaceChar + } else if len(sanitizeReplaceChar) > 1 { + return nil, errors.New("sanitize replace char has to be a singular character") + } + var f = format(metricsFormat) if _, ok := formats[f]; !ok { @@ -40,7 +53,8 @@ func NewSerializer(metricsFormat string) (*Serializer, error) { } return &Serializer{ - metricsFormat: f, + metricsFormat: f, + sanitizeReplacer: createSanitizeReplacer(sanitizedChars, rune(sanitizeReplaceChar[0])), }, nil } @@ -65,15 +79,17 @@ func (s *Serializer) createObject(metric telegraf.Metric) []byte { continue } + name := s.sanitizeReplacer.Replace(metric.Name()) + switch metricsFormat { case Carbon2FormatFieldSeparate: m.WriteString(serializeMetricFieldSeparate( - metric.Name(), fieldName, + name, fieldName, )) case Carbon2FormatMetricIncludesField: m.WriteString(serializeMetricIncludeField( - metric.Name(), fieldName, + name, fieldName, )) } @@ -152,3 +168,13 @@ func bool2int(b bool) int { } return i } + +// createSanitizeReplacer creates string replacer replacing all provided +// characters with the replaceChar. +func createSanitizeReplacer(sanitizedChars string, replaceChar rune) *strings.Replacer { + sanitizeCharPairs := make([]string, 0, 2*len(sanitizedChars)) + for _, c := range sanitizedChars { + sanitizeCharPairs = append(sanitizeCharPairs, string(c), string(replaceChar)) + } + return strings.NewReplacer(sanitizeCharPairs...) +} diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go index 7ed98d6e6d6da..86f1b66db8932 100644 --- a/plugins/serializers/carbon2/carbon2_test.go +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -12,13 +12,6 @@ import ( "github.com/influxdata/telegraf/metric" ) -func MustMetric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestSerializeMetricFloat(t *testing.T) { now := time.Now() tags := map[string]string{ @@ -27,8 +20,7 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -46,7 +38,7 @@ func TestSerializeMetricFloat(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -65,8 +57,7 @@ func TestSerializeMetricWithEmptyStringTag(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -84,7 +75,7 @@ func TestSerializeMetricWithEmptyStringTag(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -103,8 +94,7 @@ func TestSerializeWithSpaces(t *testing.T) { fields := map[string]interface{}{ "usage_idle 1": float64(91.5), } - m, err := metric.New("cpu metric", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu metric", tags, fields, now) testcases := []struct { format format @@ -122,7 +112,7 @@ func TestSerializeWithSpaces(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -141,8 +131,7 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -160,7 +149,7 @@ func TestSerializeMetricInt(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -179,8 +168,7 @@ func TestSerializeMetricString(t *testing.T) { fields := map[string]interface{}{ "usage_idle": "foobar", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -198,7 +186,7 @@ func TestSerializeMetricString(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -218,8 +206,7 @@ func TestSerializeMetricBool(t *testing.T) { "java_lang_GarbageCollector_Valid": value, } - m, err := metric.New("cpu", tags, fields, tim) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, tim) return m } @@ -228,34 +215,34 @@ func TestSerializeMetricBool(t *testing.T) { testcases := []struct { metric telegraf.Metric - format string + format format expected string }{ { metric: requireMetric(t, now, false), - format: string(Carbon2FormatFieldSeparate), + format: Carbon2FormatFieldSeparate, expected: fmt.Sprintf("metric=cpu field=java_lang_GarbageCollector_Valid tag_name=tag_value 0 %d\n", now.Unix()), }, { metric: requireMetric(t, now, false), - format: string(Carbon2FormatMetricIncludesField), + format: Carbon2FormatMetricIncludesField, expected: fmt.Sprintf("metric=cpu_java_lang_GarbageCollector_Valid tag_name=tag_value 0 %d\n", now.Unix()), }, { metric: requireMetric(t, now, true), - format: string(Carbon2FormatFieldSeparate), + format: Carbon2FormatFieldSeparate, expected: fmt.Sprintf("metric=cpu field=java_lang_GarbageCollector_Valid tag_name=tag_value 1 %d\n", now.Unix()), }, { metric: requireMetric(t, now, true), - format: string(Carbon2FormatMetricIncludesField), + format: Carbon2FormatMetricIncludesField, expected: fmt.Sprintf("metric=cpu_java_lang_GarbageCollector_Valid tag_name=tag_value 1 %d\n", now.Unix()), }, } for _, tc := range testcases { - t.Run(tc.format, func(t *testing.T) { - s, err := NewSerializer(tc.format) + t.Run(string(tc.format), func(t *testing.T) { + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(tc.metric) @@ -267,15 +254,13 @@ func TestSerializeMetricBool(t *testing.T) { } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, m} @@ -300,7 +285,7 @@ metric=cpu_value 42 0 for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.SerializeBatch(metrics) @@ -310,3 +295,112 @@ metric=cpu_value 42 0 }) } } + +func TestSerializeMetricIsProperlySanitized(t *testing.T) { + now := time.Now() + + testcases := []struct { + metricFunc func() telegraf.Metric + format format + expected string + replaceChar string + expectedErr bool + }{ + { + metricFunc: func() telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu:1 field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu_1 field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: "_", + }, + { + metricFunc: func() telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu:1:tmp:custom field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu:1:tmp:custom:namespace field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatMetricIncludesField, + expected: fmt.Sprintf("metric=cpu:1:tmp:custom:namespace_usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatMetricIncludesField, + expected: fmt.Sprintf("metric=cpu_1_tmp_custom_namespace_usage_idle 91.5 %d\n", now.Unix()), + replaceChar: "_", + }, + { + metricFunc: func() telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatMetricIncludesField, + expectedErr: true, + replaceChar: "___", + }, + } + + for _, tc := range testcases { + t.Run(string(tc.format), func(t *testing.T) { + m := tc.metricFunc() + + s, err := NewSerializer(string(tc.format), tc.replaceChar) + if tc.expectedErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + + buf, err := s.Serialize(m) + require.NoError(t, err) + + assert.Equal(t, tc.expected, string(buf)) + }) + } +} diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md index f6fd0c2ccd9bd..f68765c54ae31 100644 --- a/plugins/serializers/graphite/README.md +++ b/plugins/serializers/graphite/README.md @@ -35,6 +35,8 @@ method is used, otherwise the [Template Pattern](templates) is used. ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. # graphite_tag_support = false + ## Enable Graphite tags to support the full list of allowed characters + # graphite_tag_new_sanitize = false ## Character for separating metric name and field for Graphite tags # graphite_separator = "." ``` @@ -64,4 +66,13 @@ cpu_usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 cpu_usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 ``` +The `graphite_tag_sanitize_mode` option defines how we should sanitize the tag names and values. Possible values are `strict`, or `compatible`, with the default being `strict`. + +When in `strict` mode Telegraf uses the same rules as metrics when not using tags. +When in `compatible` mode Telegraf allows more characters through, and is based on the Graphite specification: +>Tag names must have a length >= 1 and may contain any ascii characters except `;!^=`. Tag values must also have a length >= 1, they may contain any ascii characters except `;` and the first character must not be `~`. UTF-8 characters may work for names and values, but they are not well tested and it is not recommended to use non-ascii characters in metric names or tags. Metric names get indexed under the special tag name, if a metric name starts with one or multiple ~ they simply get removed from the derived tag value because the ~ character is not allowed to be in the first position of the tag value. If a metric name consists of no other characters than ~, then it is considered invalid and may get dropped. + + + + [templates]: /docs/TEMPLATE_PATTERN.md diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index e580409fe2b9f..c6130c7b7c4b4 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -13,11 +13,14 @@ import ( "github.com/influxdata/telegraf/filter" ) -const DEFAULT_TEMPLATE = "host.tags.measurement.field" +const DefaultTemplate = "host.tags.measurement.field" var ( - allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) - hyphenChars = strings.NewReplacer( + strictAllowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) + compatibleAllowedCharsName = regexp.MustCompile(`[^ "-:\<>-\]_a-~\p{L}]`) + compatibleAllowedCharsValue = regexp.MustCompile(`[^ -:<-~\p{L}]`) + compatibleLeadingTildeDrop = regexp.MustCompile(`^[~]*(.*)`) + hyphenChars = strings.NewReplacer( "/", "-", "@", "-", "*", "-", @@ -36,11 +39,12 @@ type GraphiteTemplate struct { } type GraphiteSerializer struct { - Prefix string - Template string - TagSupport bool - Separator string - Templates []*GraphiteTemplate + Prefix string + Template string + TagSupport bool + TagSanitizeMode string + Separator string + Templates []*GraphiteTemplate } func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { @@ -56,7 +60,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { if fieldValue == "" { continue } - bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName) + bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName, s.TagSanitizeMode) metricString := fmt.Sprintf("%s %s %d\n", // insert "field" section of template bucket, @@ -87,7 +91,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { } metricString := fmt.Sprintf("%s %s %d\n", // insert "field" section of template - sanitize(InsertField(bucket, fieldName)), + strictSanitize(InsertField(bucket, fieldName)), fieldValue, timestamp) point := []byte(metricString) @@ -119,9 +123,8 @@ func formatValue(value interface{}) string { case bool: if v { return "1" - } else { - return "0" } + return "0" case uint64: return strconv.FormatUint(v, 10) case int64: @@ -142,7 +145,7 @@ func formatValue(value interface{}) string { // SerializeBucketName will take the given measurement name and tags and // produce a graphite bucket. It will use the GraphiteSerializer.Template -// to generate this, or DEFAULT_TEMPLATE. +// to generate this, or DefaultTemplate. // // NOTE: SerializeBucketName replaces the "field" portion of the template with // FIELDNAME. It is up to the user to replace this. This is so that @@ -155,7 +158,7 @@ func SerializeBucketName( prefix string, ) string { if template == "" { - template = DEFAULT_TEMPLATE + template = DefaultTemplate } tagsCopy := make(map[string]string) for k, v := range tags { @@ -214,11 +217,11 @@ func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, err if len(parts) == 1 { if parts[0] == "" { return nil, "", fmt.Errorf("missing template at position: %d", i) - } else { - // Override default template - defaultTemplate = t - continue } + + // Override default template + defaultTemplate = t + continue } if len(parts) > 2 { @@ -249,6 +252,7 @@ func SerializeBucketNameWithTags( prefix string, separator string, field string, + tagSanitizeMode string, ) string { var out string var tagsCopy []string @@ -256,7 +260,11 @@ func SerializeBucketNameWithTags( if k == "name" { k = "_name" } - tagsCopy = append(tagsCopy, sanitize(k+"="+v)) + if tagSanitizeMode == "compatible" { + tagsCopy = append(tagsCopy, compatibleSanitize(k, v)) + } else { + tagsCopy = append(tagsCopy, strictSanitize(k+"="+v)) + } } sort.Strings(tagsCopy) @@ -270,7 +278,7 @@ func SerializeBucketNameWithTags( out += separator + field } - out = sanitize(out) + out = strictSanitize(out) if len(tagsCopy) > 0 { out += ";" + strings.Join(tagsCopy, ";") @@ -297,23 +305,30 @@ func buildTags(tags map[string]string) string { } sort.Strings(keys) - var tag_str string + var tagStr string for i, k := range keys { - tag_value := strings.Replace(tags[k], ".", "_", -1) + tagValue := strings.Replace(tags[k], ".", "_", -1) if i == 0 { - tag_str += tag_value + tagStr += tagValue } else { - tag_str += "." + tag_value + tagStr += "." + tagValue } } - return tag_str + return tagStr } -func sanitize(value string) string { +func strictSanitize(value string) string { // Apply special hyphenation rules to preserve backwards compatibility value = hyphenChars.Replace(value) // Apply rule to drop some chars to preserve backwards compatibility value = dropChars.Replace(value) // Replace any remaining illegal chars - return allowedChars.ReplaceAllLiteralString(value, "_") + return strictAllowedChars.ReplaceAllLiteralString(value, "_") +} + +func compatibleSanitize(name string, value string) string { + name = compatibleAllowedCharsName.ReplaceAllLiteralString(name, "_") + value = compatibleAllowedCharsValue.ReplaceAllLiteralString(value, "_") + value = compatibleLeadingTildeDrop.FindStringSubmatch(value)[1] + return name + "=" + value } diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index b6fcad696dc2e..f2fd3b7f150a9 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -32,19 +32,19 @@ const ( ) func TestGraphiteTags(t *testing.T) { - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "afoo": "first", "bfoo": "second"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "mymeasurement", map[string]string{"afoo": "first", "bfoo": "second"}, map[string]interface{}{"value": float64(3.14)}, @@ -70,13 +70,11 @@ func TestSerializeMetricNoHost(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), @@ -97,8 +95,7 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -106,7 +103,6 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.usage_idle;cpu=cpu0;datacenter=us-west-2 91.5 %d", now.Unix()), @@ -128,13 +124,11 @@ func TestSerializeMetricHost(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), @@ -156,9 +150,8 @@ func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m1, err := metric.New("cpu", tags, fields, now) - m2, err := metric.New("new_cpu", tags, fields, now) - assert.NoError(t, err) + m1 := metric.New("cpu", tags, fields, now) + m2 := metric.New("new_cpu", tags, fields, now) templates, defaultTemplate, err := InitGraphiteTemplates([]string{ "cp* tags.measurement.host.field", @@ -201,9 +194,8 @@ func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m1, err := metric.New("cpu", tags, fields, now) - m2, err := metric.New("new_cpu", tags, fields, now) - assert.NoError(t, err) + m1 := metric.New("cpu", tags, fields, now) + m2 := metric.New("new_cpu", tags, fields, now) templates, defaultTemplate, err := InitGraphiteTemplates([]string{ "cp* tags.measurement.host.field", @@ -247,8 +239,7 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -256,7 +247,6 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.usage_idle;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -278,13 +268,11 @@ func TestSerializeValueField(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), @@ -302,8 +290,7 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -311,7 +298,6 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -330,15 +316,13 @@ func TestSerializeValueField2(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.field.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), @@ -356,15 +340,13 @@ func TestSerializeValueString(t *testing.T) { fields := map[string]interface{}{ "value": "asdasd", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.field.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) assert.Equal(t, "", mS[0]) } @@ -378,8 +360,7 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) { fields := map[string]interface{}{ "value": "asdasd", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -387,7 +368,6 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) assert.Equal(t, "", mS[0]) } @@ -402,15 +382,13 @@ func TestSerializeValueBoolean(t *testing.T) { "enabled": true, "disabled": false, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.field.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.enabled.cpu0.us-west-2.cpu 1 %d", now.Unix()), @@ -432,8 +410,7 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) { "enabled": true, "disabled": false, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -441,7 +418,6 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.enabled;cpu=cpu0;datacenter=us-west-2;host=localhost 1 %d", now.Unix()), @@ -458,8 +434,7 @@ func TestSerializeValueUnsigned(t *testing.T) { fields := map[string]interface{}{ "free": uint64(42), } - m, err := metric.New("mem", tags, fields, now) - require.NoError(t, err) + m := metric.New("mem", tags, fields, now) s := GraphiteSerializer{} buf, err := s.Serialize(m) @@ -479,15 +454,13 @@ func TestSerializeFieldWithSpaces(t *testing.T) { fields := map[string]interface{}{ `field\ with\ spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.tags.measurement.field", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), @@ -505,8 +478,7 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { fields := map[string]interface{}{ `field\ with\ spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -514,7 +486,6 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.field_with_spaces;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -533,15 +504,13 @@ func TestSerializeTagWithSpaces(t *testing.T) { fields := map[string]interface{}{ `field_with_spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.tags.measurement.field", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu_0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), @@ -559,8 +528,7 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { fields := map[string]interface{}{ `field_with_spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -568,7 +536,6 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.field_with_spaces;cpu=cpu_0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -576,6 +543,32 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { assert.Equal(t, expS, mS) } +func TestSerializeTagWithSpacesWithTagSupportCompatibleSanitize(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": `cpu\ 0`, + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + `field_with_spaces`: float64(91.5), + } + m := metric.New("cpu", tags, fields, now) + + s := GraphiteSerializer{ + TagSupport: true, + TagSanitizeMode: "compatible", + Separator: ".", + } + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + + expS := []string{ + fmt.Sprintf("cpu.field_with_spaces;cpu=cpu\\ 0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), + } + assert.Equal(t, expS, mS) +} + // test that a field named "value" gets ignored at beginning of template. func TestSerializeValueField3(t *testing.T) { now := time.Now() @@ -587,15 +580,13 @@ func TestSerializeValueField3(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "field.host.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), @@ -614,15 +605,13 @@ func TestSerializeValueField5(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: template5, } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.us-west-2.cpu0.cpu 91.5 %d", now.Unix()), @@ -641,13 +630,11 @@ func TestSerializeMetricPrefix(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{Prefix: "prefix"} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("prefix.localhost.cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), @@ -669,8 +656,7 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Prefix: "prefix", @@ -679,7 +665,6 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("prefix.cpu.usage_idle;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -699,8 +684,7 @@ func TestSerializeBucketNameNoHost(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), "", "") @@ -713,8 +697,7 @@ func TestSerializeBucketNameHost(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), "", "") @@ -727,8 +710,7 @@ func TestSerializeBucketNamePrefix(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), "", "prefix") @@ -741,8 +723,7 @@ func TestTemplate1(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template1, "") @@ -755,8 +736,7 @@ func TestTemplate2(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template2, "") @@ -769,8 +749,7 @@ func TestTemplate3(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template3, "") @@ -783,8 +762,7 @@ func TestTemplate4(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template4, "") @@ -797,8 +775,7 @@ func TestTemplate6(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template6, "") @@ -809,11 +786,11 @@ func TestTemplate6(t *testing.T) { func TestClean(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -890,8 +867,7 @@ func TestClean(t *testing.T) { s := GraphiteSerializer{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.Serialize(m) require.Equal(t, tt.expected, string(actual)) }) @@ -901,11 +877,11 @@ func TestClean(t *testing.T) { func TestCleanWithTagsSupport(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -985,8 +961,102 @@ func TestCleanWithTagsSupport(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) + actual, _ := s.Serialize(m) + require.Equal(t, tt.expected, string(actual)) + }) + } +} + +func TestCleanWithTagsSupportCompatibleSanitize(t *testing.T) { + now := time.Unix(1234567890, 0) + tests := []struct { + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string + }{ + { + "Base metric", + "cpu", + map[string]string{"host": "localhost"}, + map[string]interface{}{"usage_busy": float64(8.5)}, + "cpu.usage_busy;host=localhost 8.5 1234567890\n", + }, + { + "Dot and whitespace in tags", + "cpu", + map[string]string{"host": "localhost", "label.dot and space": "value with.dot"}, + map[string]interface{}{"usage_busy": float64(8.5)}, + "cpu.usage_busy;host=localhost;label.dot and space=value with.dot 8.5 1234567890\n", + }, + { + "Field with space", + "system", + map[string]string{"host": "localhost"}, + map[string]interface{}{"uptime_format": "20 days, 23:26"}, + "", // yes nothing. graphite don't serialize string fields + }, + { + "Allowed punct", + "cpu", + map[string]string{"host": "localhost", "tag": "-_:=!^~"}, + map[string]interface{}{"usage_busy": float64(10)}, + "cpu.usage_busy;host=localhost;tag=-_:=!^~ 10 1234567890\n", + }, + { + "Special characters preserved", + "cpu", + map[string]string{"host": "localhost", "tag": "/@*"}, + map[string]interface{}{"usage_busy": float64(10)}, + "cpu.usage_busy;host=localhost;tag=/@* 10 1234567890\n", + }, + { + "Special characters preserved 2", + "cpu", + map[string]string{"host": "localhost", "tag": `\no change to slash`}, + map[string]interface{}{"usage_busy": float64(10)}, + "cpu.usage_busy;host=localhost;tag=\\no change to slash 10 1234567890\n", + }, + { + "Empty tag & value field", + "cpu", + map[string]string{"host": "localhost"}, + map[string]interface{}{"value": float64(10)}, + "cpu;host=localhost 10 1234567890\n", + }, + { + "Unicode Letters allowed", + "cpu", + map[string]string{"host": "localhost", "tag": "μnicodε_letters"}, + map[string]interface{}{"value": float64(10)}, + "cpu;host=localhost;tag=μnicodε_letters 10 1234567890\n", + }, + { + "Other Unicode not allowed", + "cpu", + map[string]string{"host": "localhost", "tag": "“☢â€"}, + map[string]interface{}{"value": float64(10)}, + "cpu;host=localhost;tag=___ 10 1234567890\n", + }, + { + "Newline in tags", + "cpu", + map[string]string{"host": "localhost", "label": "some\nthing\nwith\nnewline"}, + map[string]interface{}{"usage_busy": float64(8.5)}, + "cpu.usage_busy;host=localhost;label=some_thing_with_newline 8.5 1234567890\n", + }, + } + + s := GraphiteSerializer{ + TagSupport: true, + TagSanitizeMode: "compatible", + Separator: ".", + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.Serialize(m) require.Equal(t, tt.expected, string(actual)) }) @@ -996,11 +1066,11 @@ func TestCleanWithTagsSupport(t *testing.T) { func TestSerializeBatch(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -1014,8 +1084,7 @@ func TestSerializeBatch(t *testing.T) { s := GraphiteSerializer{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.SerializeBatch([]telegraf.Metric{m, m}) require.Equal(t, tt.expected, string(actual)) }) @@ -1025,11 +1094,11 @@ func TestSerializeBatch(t *testing.T) { func TestSerializeBatchWithTagsSupport(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -1046,8 +1115,7 @@ func TestSerializeBatchWithTagsSupport(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.SerializeBatch([]telegraf.Metric{m, m}) require.Equal(t, tt.expected, string(actual)) }) diff --git a/plugins/serializers/influx/escape.go b/plugins/serializers/influx/escape.go index 9320eb7fa5057..0f9fb5edf2add 100644 --- a/plugins/serializers/influx/escape.go +++ b/plugins/serializers/influx/escape.go @@ -38,25 +38,22 @@ var ( func escape(s string) string { if strings.ContainsAny(s, escapes) { return escaper.Replace(s) - } else { - return s } + return s } // Escape a measurement name func nameEscape(s string) string { if strings.ContainsAny(s, nameEscapes) { return nameEscaper.Replace(s) - } else { - return s } + return s } // Escape a string field func stringFieldEscape(s string) string { if strings.ContainsAny(s, stringFieldEscapes) { return stringFieldEscaper.Replace(s) - } else { - return s } + return s } diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index aa76b8accb8e1..978614376dabb 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -237,7 +237,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { // Additional length needed for field separator `,` if !firstField { - bytesNeeded += 1 + bytesNeeded++ } if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { @@ -302,13 +302,11 @@ func (s *Serializer) appendFieldValue(buf []byte, value interface{}) ([]byte, er case uint64: if s.fieldTypeSupport&UintSupport != 0 { return appendUintField(buf, v), nil - } else { - if v <= uint64(MaxInt64) { - return appendIntField(buf, int64(v)), nil - } else { - return appendIntField(buf, int64(MaxInt64)), nil - } } + if v <= uint64(MaxInt64) { + return appendIntField(buf, int64(v)), nil + } + return appendIntField(buf, MaxInt64), nil case int64: return appendIntField(buf, v), nil case float64: diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index a86215d94bf4b..f80718b3aa8e6 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -10,13 +10,6 @@ import ( "github.com/stretchr/testify/require" ) -func MustMetric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - var tests = []struct { name string maxBytes int @@ -27,506 +20,446 @@ var tests = []struct { }{ { name: "minimal", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42 0\n"), }, { name: "multiple tags", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "host": "localhost", - "cpu": "CPU0", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "host": "localhost", + "cpu": "CPU0", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), output: []byte("cpu,cpu=CPU0,host=localhost value=42 0\n"), }, { name: "multiple fields", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "x": 42.0, - "y": 42.0, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "x": 42.0, + "y": 42.0, + }, + time.Unix(0, 0), ), output: []byte("cpu x=42,y=42 0\n"), }, { name: "float NaN", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "x": math.NaN(), - "y": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "x": math.NaN(), + "y": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu y=42i 0\n"), }, { name: "float NaN only", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": math.NaN(), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": math.NaN(), + }, + time.Unix(0, 0), ), errReason: NoFields, }, { name: "float Inf", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": math.Inf(1), - "y": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": math.Inf(1), + "y": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu y=42i 0\n"), }, { name: "integer field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "integer field 64-bit", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": int64(123456789012345), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": int64(123456789012345), + }, + time.Unix(0, 0), ), output: []byte("cpu value=123456789012345i 0\n"), }, { name: "uint field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(42), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(42), + }, + time.Unix(0, 0), ), output: []byte("cpu value=42u 0\n"), typeSupport: UintSupport, }, { name: "uint field max value", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(18446744073709551615), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(18446744073709551615), + }, + time.Unix(0, 0), ), output: []byte("cpu value=18446744073709551615u 0\n"), typeSupport: UintSupport, }, { name: "uint field no uint support", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(42), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(42), + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "uint field no uint support overflow", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(18446744073709551615), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(18446744073709551615), + }, + time.Unix(0, 0), ), output: []byte("cpu value=9223372036854775807i 0\n"), }, { name: "bool field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": true, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": true, + }, + time.Unix(0, 0), ), output: []byte("cpu value=true 0\n"), }, { name: "string field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "howdy", - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "howdy", + }, + time.Unix(0, 0), ), output: []byte("cpu value=\"howdy\" 0\n"), }, { name: "timestamp", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu value=42 1519194109000000042\n"), }, { name: "split fields exact", maxBytes: 33, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"), }, { name: "split fields extra", maxBytes: 34, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"), }, { name: "split_fields_overflow", maxBytes: 43, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - "ghi": 789, - "jkl": 123, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + "ghi": 789, + "jkl": 123, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu abc=123i,def=456i 1519194109000000042\ncpu ghi=789i,jkl=123i 1519194109000000042\n"), }, { name: "name newline", - input: MustMetric( - metric.New( - "c\npu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "c\npu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("c\\npu value=42i 0\n"), }, { name: "tag newline", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "host": "x\ny", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "host": "x\ny", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu,host=x\\ny value=42i 0\n"), }, { name: "empty tag value is removed", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "host": "", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "host": "", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "empty tag key is removed", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "": "example.org", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "": "example.org", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "tag value ends with backslash is trimmed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - "path": `C:\`, - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + "path": `C:\`, + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk,path=C: value=42i 0\n"), }, { name: "tag key ends with backslash is trimmed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - `path\`: "/", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + `path\`: "/", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk,path=/ value=42i 0\n"), }, { name: "tag key backslash is trimmed and removed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - `\`: "example.org", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + `\`: "example.org", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk value=42i 0\n"), }, { name: "tag value backslash is trimmed and removed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - "host": `\`, - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + "host": `\`, + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk value=42i 0\n"), }, { name: "string newline", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "x\ny", - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "x\ny", + }, + time.Unix(0, 0), ), output: []byte("cpu value=\"x\ny\" 0\n"), }, { name: "need more space", maxBytes: 32, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + }, + time.Unix(1519194109, 42), ), output: nil, errReason: NeedMoreSpace, }, { name: "no fields", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), ), errReason: NoFields, }, { name: "procstat", - input: MustMetric( - metric.New( - "procstat", - map[string]string{ - "exe": "bash", - "process_name": "bash", - }, - map[string]interface{}{ - "cpu_time": 0, - "cpu_time_guest": float64(0), - "cpu_time_guest_nice": float64(0), - "cpu_time_idle": float64(0), - "cpu_time_iowait": float64(0), - "cpu_time_irq": float64(0), - "cpu_time_nice": float64(0), - "cpu_time_soft_irq": float64(0), - "cpu_time_steal": float64(0), - "cpu_time_system": float64(0), - "cpu_time_user": float64(0.02), - "cpu_usage": float64(0), - "involuntary_context_switches": 2, - "memory_data": 1576960, - "memory_locked": 0, - "memory_rss": 5103616, - "memory_stack": 139264, - "memory_swap": 0, - "memory_vms": 21659648, - "nice_priority": 20, - "num_fds": 4, - "num_threads": 1, - "pid": 29417, - "read_bytes": 0, - "read_count": 259, - "realtime_priority": 0, - "rlimit_cpu_time_hard": 2147483647, - "rlimit_cpu_time_soft": 2147483647, - "rlimit_file_locks_hard": 2147483647, - "rlimit_file_locks_soft": 2147483647, - "rlimit_memory_data_hard": 2147483647, - "rlimit_memory_data_soft": 2147483647, - "rlimit_memory_locked_hard": 65536, - "rlimit_memory_locked_soft": 65536, - "rlimit_memory_rss_hard": 2147483647, - "rlimit_memory_rss_soft": 2147483647, - "rlimit_memory_stack_hard": 2147483647, - "rlimit_memory_stack_soft": 8388608, - "rlimit_memory_vms_hard": 2147483647, - "rlimit_memory_vms_soft": 2147483647, - "rlimit_nice_priority_hard": 0, - "rlimit_nice_priority_soft": 0, - "rlimit_num_fds_hard": 4096, - "rlimit_num_fds_soft": 1024, - "rlimit_realtime_priority_hard": 0, - "rlimit_realtime_priority_soft": 0, - "rlimit_signals_pending_hard": 78994, - "rlimit_signals_pending_soft": 78994, - "signals_pending": 0, - "voluntary_context_switches": 42, - "write_bytes": 106496, - "write_count": 35, - }, - time.Unix(0, 1517620624000000000), - ), + input: metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), ), output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"), }, @@ -565,15 +498,13 @@ func BenchmarkSerializer(b *testing.B) { } func TestSerialize_SerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, m} diff --git a/plugins/serializers/influx/reader.go b/plugins/serializers/influx/reader.go index 55b6c2b4130ec..4e0b60343172b 100644 --- a/plugins/serializers/influx/reader.go +++ b/plugins/serializers/influx/reader.go @@ -50,7 +50,7 @@ func (r *reader) Read(p []byte) (int, error) { for _, metric := range r.metrics[r.offset:] { _, err := r.serializer.Write(r.buf, metric) - r.offset += 1 + r.offset++ if err != nil { r.buf.Reset() if _, ok := err.(*MetricError); ok { diff --git a/plugins/serializers/influx/reader_test.go b/plugins/serializers/influx/reader_test.go index 7aaf3fccf41e9..217660e43f4bd 100644 --- a/plugins/serializers/influx/reader_test.go +++ b/plugins/serializers/influx/reader_test.go @@ -24,15 +24,13 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 20, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -42,25 +40,21 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 20, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\ncpu value=42 0\n"), @@ -70,15 +64,13 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 15, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -88,25 +80,21 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 15, input: []telegraf.Metric{ - MustMetric( - metric.New( - "", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -116,25 +104,21 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 15, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), - MustMetric( - metric.New( - "", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -169,15 +153,13 @@ func TestReader(t *testing.T) { } func TestZeroLengthBufferNoError(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) serializer := NewSerializer() serializer.SetFieldSortOrder(SortFields) @@ -191,79 +173,76 @@ func TestZeroLengthBufferNoError(t *testing.T) { } func BenchmarkReader(b *testing.B) { - m := MustMetric( - metric.New( - "procstat", - map[string]string{ - "exe": "bash", - "process_name": "bash", - }, - map[string]interface{}{ - "cpu_time": 0, - "cpu_time_guest": float64(0), - "cpu_time_guest_nice": float64(0), - "cpu_time_idle": float64(0), - "cpu_time_iowait": float64(0), - "cpu_time_irq": float64(0), - "cpu_time_nice": float64(0), - "cpu_time_soft_irq": float64(0), - "cpu_time_steal": float64(0), - "cpu_time_system": float64(0), - "cpu_time_user": float64(0.02), - "cpu_usage": float64(0), - "involuntary_context_switches": 2, - "memory_data": 1576960, - "memory_locked": 0, - "memory_rss": 5103616, - "memory_stack": 139264, - "memory_swap": 0, - "memory_vms": 21659648, - "nice_priority": 20, - "num_fds": 4, - "num_threads": 1, - "pid": 29417, - "read_bytes": 0, - "read_count": 259, - "realtime_priority": 0, - "rlimit_cpu_time_hard": 2147483647, - "rlimit_cpu_time_soft": 2147483647, - "rlimit_file_locks_hard": 2147483647, - "rlimit_file_locks_soft": 2147483647, - "rlimit_memory_data_hard": 2147483647, - "rlimit_memory_data_soft": 2147483647, - "rlimit_memory_locked_hard": 65536, - "rlimit_memory_locked_soft": 65536, - "rlimit_memory_rss_hard": 2147483647, - "rlimit_memory_rss_soft": 2147483647, - "rlimit_memory_stack_hard": 2147483647, - "rlimit_memory_stack_soft": 8388608, - "rlimit_memory_vms_hard": 2147483647, - "rlimit_memory_vms_soft": 2147483647, - "rlimit_nice_priority_hard": 0, - "rlimit_nice_priority_soft": 0, - "rlimit_num_fds_hard": 4096, - "rlimit_num_fds_soft": 1024, - "rlimit_realtime_priority_hard": 0, - "rlimit_realtime_priority_soft": 0, - "rlimit_signals_pending_hard": 78994, - "rlimit_signals_pending_soft": 78994, - "signals_pending": 0, - "voluntary_context_switches": 42, - "write_bytes": 106496, - "write_count": 35, - }, - time.Unix(0, 1517620624000000000), - ), + m := metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), ) - - metrics := make([]telegraf.Metric, 1000, 1000) + metrics := make([]telegraf.Metric, 1000) for i := range metrics { metrics[i] = m } b.ResetTimer() for i := 0; i < b.N; i++ { - readbuf := make([]byte, 4096, 4096) + readbuf := make([]byte, 4096) serializer := NewSerializer() reader := NewReader(metrics, serializer) for { diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 9ea304c88eedb..74d7f94166621 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -28,12 +28,11 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":91.5},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") assert.Equal(t, string(expS), string(buf)) @@ -78,15 +77,13 @@ func TestSerialize_TimestampUnits(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(1525478795, 123456789), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1525478795, 123456789), ) s, _ := NewSerializer(tt.timestampUnits) actual, err := s.Serialize(m) @@ -104,12 +101,11 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") @@ -124,12 +120,11 @@ func TestSerializeMetricString(t *testing.T) { fields := map[string]interface{}{ "usage_idle": "foobar", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":"foobar"},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") @@ -145,12 +140,11 @@ func TestSerializeMultiFields(t *testing.T) { "usage_idle": int64(90), "usage_total": 8559615, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90,"usage_total":8559615},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") @@ -165,8 +159,7 @@ func TestSerializeMetricWithEscapes(t *testing.T) { fields := map[string]interface{}{ "U,age=Idle": int64(90), } - m, err := metric.New("My CPU", tags, fields, now) - assert.NoError(t, err) + m := metric.New("My CPU", tags, fields, now) s, _ := NewSerializer(0) buf, err := s.Serialize(m) @@ -177,15 +170,13 @@ func TestSerializeMetricWithEscapes(t *testing.T) { } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, m} diff --git a/plugins/serializers/msgpack/README.md b/plugins/serializers/msgpack/README.md new file mode 100644 index 0000000000000..5607cc64c05bc --- /dev/null +++ b/plugins/serializers/msgpack/README.md @@ -0,0 +1,45 @@ +# MessagePack: + +MessagePack is an efficient binary serialization format. It lets you exchange data among multiple languages like JSON. + +https://msgpack.org + +### Format Definitions: + +Output of this format is MessagePack binary representation of metrics that have identical structure of the below JSON. + +``` +{ + "name":"cpu", + "time": , // https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type + "tags":{ + "tag_1":"host01", + ... + }, + "fields":{ + "field_1":30, + "field_2":true, + "field_3":"field_value" + "field_4":30.1 + ... + } +} +``` + +MessagePack has it's own timestamp representation. You can find additional informations from [MessagePack specification](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type). + +### MessagePack Configuration: + +There are no additional configuration options for MessagePack format. + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "msgpack" +``` \ No newline at end of file diff --git a/plugins/serializers/msgpack/metric.go b/plugins/serializers/msgpack/metric.go new file mode 100644 index 0000000000000..6b8a00878b6a8 --- /dev/null +++ b/plugins/serializers/msgpack/metric.go @@ -0,0 +1,104 @@ +package msgpack + +import ( + "encoding/binary" + "time" + + "github.com/tinylib/msgp/msgp" +) + +//go:generate msgp + +// Metric is structure to define MessagePack message format +// will be used by msgp code generator +type Metric struct { + Name string `msg:"name"` + Time MessagePackTime `msg:"time,extension"` + Tags map[string]string `msg:"tags"` + Fields map[string]interface{} `msg:"fields"` +} + +// MessagePackTime implements the official timestamp extension type +// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type +// +// tinylib/msgp has been using their own custom extension type and the official extension +// is not available. (https://github.com/tinylib/msgp/issues/214) +type MessagePackTime struct { + time time.Time +} + +func init() { + msgp.RegisterExtension(-1, func() msgp.Extension { return new(MessagePackTime) }) +} + +// ExtensionType implements the Extension interface +func (*MessagePackTime) ExtensionType() int8 { + return -1 +} + +// Len implements the Extension interface +// The timestamp extension uses variable length encoding depending the input +// +// 32bits: [1970-01-01 00:00:00 UTC, 2106-02-07 06:28:16 UTC) range. If the nanoseconds part is 0 +// 64bits: [1970-01-01 00:00:00.000000000 UTC, 2514-05-30 01:53:04.000000000 UTC) range. +// 96bits: [-584554047284-02-23 16:59:44 UTC, 584554051223-11-09 07:00:16.000000000 UTC) range. +func (t *MessagePackTime) Len() int { + sec := t.time.Unix() + nsec := t.time.Nanosecond() + + if sec < 0 || sec >= (1<<34) { // 96 bits encoding + return 12 + } + if sec >= (1<<32) || nsec != 0 { + return 8 + } + return 4 +} + +// MarshalBinaryTo implements the Extension interface +func (t *MessagePackTime) MarshalBinaryTo(buf []byte) error { + len := t.Len() + + if len == 4 { + sec := t.time.Unix() + binary.BigEndian.PutUint32(buf, uint32(sec)) + } else if len == 8 { + sec := t.time.Unix() + nsec := t.time.Nanosecond() + + data := uint64(nsec)<<34 | (uint64(sec) & 0x03_ffff_ffff) + binary.BigEndian.PutUint64(buf, data) + } else if len == 12 { + sec := t.time.Unix() + nsec := t.time.Nanosecond() + + binary.BigEndian.PutUint32(buf, uint32(nsec)) + binary.BigEndian.PutUint64(buf[4:], uint64(sec)) + } + + return nil +} + +// UnmarshalBinary implements the Extension interface +func (t *MessagePackTime) UnmarshalBinary(buf []byte) error { + len := len(buf) + + if len == 4 { + sec := binary.BigEndian.Uint32(buf) + t.time = time.Unix(int64(sec), 0) + } else if len == 8 { + data := binary.BigEndian.Uint64(buf) + + nsec := (data & 0xfffffffc_00000000) >> 34 + sec := (data & 0x00000003_ffffffff) + + t.time = time.Unix(int64(sec), int64(nsec)) + } else if len == 12 { + nsec := binary.BigEndian.Uint32(buf) + sec := binary.BigEndian.Uint64(buf[4:]) + + t.time = time.Unix(int64(sec), int64(nsec)) + } + + return nil +} diff --git a/plugins/serializers/msgpack/metric_gen.go b/plugins/serializers/msgpack/metric_gen.go new file mode 100644 index 0000000000000..f02b0aba28503 --- /dev/null +++ b/plugins/serializers/msgpack/metric_gen.go @@ -0,0 +1,417 @@ +package msgpack + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *MessagePackTime) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z MessagePackTime) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 0 + err = en.Append(0x80) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z MessagePackTime) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *MessagePackTime) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z MessagePackTime) Msgsize() (s int) { + s = 1 + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Metric) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "name": + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "time": + err = dc.ReadExtension(&z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + case "tags": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0002) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + z.Tags[za0001] = za0002 + } + case "fields": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + if z.Fields == nil { + z.Fields = make(map[string]interface{}, zb0003) + } else if len(z.Fields) > 0 { + for key := range z.Fields { + delete(z.Fields, key) + } + } + for zb0003 > 0 { + zb0003-- + var za0003 string + var za0004 interface{} + za0003, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + za0004, err = dc.ReadIntf() + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + z.Fields[za0003] = za0004 + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Metric) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 4 + // write "name" + err = en.Append(0x84, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + // write "time" + err = en.Append(0xa4, 0x74, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteExtension(&z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + // write "tags" + err = en.Append(0xa4, 0x74, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Tags))) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + for za0001, za0002 := range z.Tags { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + } + // write "fields" + err = en.Append(0xa6, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Fields))) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + for za0003, za0004 := range z.Fields { + err = en.WriteString(za0003) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + err = en.WriteIntf(za0004) + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Metric) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "name" + o = append(o, 0x84, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "time" + o = append(o, 0xa4, 0x74, 0x69, 0x6d, 0x65) + o, err = msgp.AppendExtension(o, &z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + // string "tags" + o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0001, za0002 := range z.Tags { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + // string "fields" + o = append(o, 0xa6, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Fields))) + for za0003, za0004 := range z.Fields { + o = msgp.AppendString(o, za0003) + o, err = msgp.AppendIntf(o, za0004) + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Metric) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "time": + bts, err = msgp.ReadExtensionBytes(bts, &z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + case "tags": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0002) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + z.Tags[za0001] = za0002 + } + case "fields": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + if z.Fields == nil { + z.Fields = make(map[string]interface{}, zb0003) + } else if len(z.Fields) > 0 { + for key := range z.Fields { + delete(z.Fields, key) + } + } + for zb0003 > 0 { + var za0003 string + var za0004 interface{} + zb0003-- + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + za0004, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + z.Fields[za0003] = za0004 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Metric) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 5 + msgp.ExtensionPrefixSize + z.Time.Len() + 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0001, za0002 := range z.Tags { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 7 + msgp.MapHeaderSize + if z.Fields != nil { + for za0003, za0004 := range z.Fields { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.GuessSize(za0004) + } + } + return +} diff --git a/plugins/serializers/msgpack/metric_gen_test.go b/plugins/serializers/msgpack/metric_gen_test.go new file mode 100644 index 0000000000000..e24d0a9b179c3 --- /dev/null +++ b/plugins/serializers/msgpack/metric_gen_test.go @@ -0,0 +1,236 @@ +package msgpack + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalMessagePackTime(t *testing.T) { + v := MessagePackTime{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMessagePackTime(b *testing.B) { + v := MessagePackTime{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMessagePackTime(b *testing.B) { + v := MessagePackTime{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMessagePackTime(b *testing.B) { + v := MessagePackTime{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMessagePackTime(t *testing.T) { + v := MessagePackTime{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeMessagePackTime Msgsize() is inaccurate") + } + + vn := MessagePackTime{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMessagePackTime(b *testing.B) { + v := MessagePackTime{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMessagePackTime(b *testing.B) { + v := MessagePackTime{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalMetric(t *testing.T) { + v := Metric{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetric(b *testing.B) { + v := Metric{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetric(b *testing.B) { + v := Metric{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetric(b *testing.B) { + v := Metric{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetric(t *testing.T) { + v := Metric{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeMetric Msgsize() is inaccurate") + } + + vn := Metric{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetric(b *testing.B) { + v := Metric{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetric(b *testing.B) { + v := Metric{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/plugins/serializers/msgpack/metric_test.go b/plugins/serializers/msgpack/metric_test.go new file mode 100644 index 0000000000000..e0ea25ebc88a7 --- /dev/null +++ b/plugins/serializers/msgpack/metric_test.go @@ -0,0 +1,143 @@ +package msgpack + +import ( + "encoding/hex" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestMsgPackTime32(t *testing.T) { + // Maximum of 4 bytes encodable time + var sec int64 = 0xFFFFFFFF + var nsec int64 = 0 + t1 := MessagePackTime{time: time.Unix(sec, nsec)} + + assert.Equal(t, t1.Len(), 4) + + buf := make([]byte, t1.Len()) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 := new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.Equal(t, t1.time, t2.time) +} + +func TestMsgPackTime64(t *testing.T) { + // Maximum of 8 bytes encodable time + var sec int64 = 0x3FFFFFFFF + var nsec int64 = 999999999 + t1 := MessagePackTime{time: time.Unix(sec, nsec)} + + assert.Equal(t, t1.Len(), 8) + + buf := make([]byte, t1.Len()) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 := new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.Equal(t, t1.time, t2.time) +} + +func TestMsgPackTime96(t *testing.T) { + // Testing 12 bytes timestamp + var sec int64 = 0x400000001 + var nsec int64 = 111111111 + t1 := MessagePackTime{time: time.Unix(sec, nsec)} + + assert.Equal(t, t1.Len(), 12) + + buf := make([]byte, t1.Len()) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 := new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.True(t, t1.time.Equal(t2.time)) + + // Testing the default value: 0001-01-01T00:00:00Z + t1 = MessagePackTime{} + + assert.Equal(t, t1.Len(), 12) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 = new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.True(t, t1.time.Equal(t2.time)) +} + +func TestMsgPackTimeEdgeCases(t *testing.T) { + times := make([]time.Time, 0) + expected := make([][]byte, 0) + + // Unix epoch. Begin of 4bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x0000000000000000 + ts, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:00Z") + bs, _ := hex.DecodeString("d6ff00000000") + times = append(times, ts) + expected = append(expected, bs) + + // End of 4bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x00000000ffffffff + ts, _ = time.Parse(time.RFC3339, "2106-02-07T06:28:15Z") + bs, _ = hex.DecodeString("d6ffffffffff") + times = append(times, ts) + expected = append(expected, bs) + + // Begin of 8bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x0000000100000000 + ts, _ = time.Parse(time.RFC3339, "2106-02-07T06:28:16Z") + bs, _ = hex.DecodeString("d7ff0000000100000000") + times = append(times, ts) + expected = append(expected, bs) + + // Just after Unix epoch. Non zero nanoseconds + // Nanoseconds: 0x00000001, Seconds: 0x0000000000000000 + ts, _ = time.Parse(time.RFC3339Nano, "1970-01-01T00:00:00.000000001Z") + bs, _ = hex.DecodeString("d7ff0000000400000000") + times = append(times, ts) + expected = append(expected, bs) + + // End of 8bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x00000003ffffffff + ts, _ = time.Parse(time.RFC3339Nano, "2514-05-30T01:53:03.000000000Z") + bs, _ = hex.DecodeString("d7ff00000003ffffffff") + times = append(times, ts) + expected = append(expected, bs) + + // Begin of 12bytes date + // Nanoseconds: 0x00000000, Seconds: 0x0000000400000000 + ts, _ = time.Parse(time.RFC3339Nano, "2514-05-30T01:53:04.000000000Z") + bs, _ = hex.DecodeString("c70cff000000000000000400000000") + times = append(times, ts) + expected = append(expected, bs) + + // Zero value, 0001-01-01T00:00:00Z + // Nanoseconds: 0x00000000, Seconds: 0xfffffff1886e0900 + ts = time.Time{} + bs, _ = hex.DecodeString("c70cff00000000fffffff1886e0900") + times = append(times, ts) + expected = append(expected, bs) + + // Max value + // Nanoseconds: 0x3b9ac9ff, Seconds: 0x7fffffffffffffff + ts = time.Unix(math.MaxInt64, 999_999_999).UTC() + bs, _ = hex.DecodeString("c70cff3b9ac9ff7fffffffffffffff") + times = append(times, ts) + expected = append(expected, bs) + + buf := make([]byte, 0) + for i, ts := range times { + t1 := MessagePackTime{time: ts} + m := Metric{Time: t1} + + buf = buf[:0] + buf, _ = m.MarshalMsg(buf) + assert.Equal(t, expected[i], buf[12:len(buf)-14]) + } +} diff --git a/plugins/serializers/msgpack/msgpack.go b/plugins/serializers/msgpack/msgpack.go new file mode 100644 index 0000000000000..cd5f6ceb2dc87 --- /dev/null +++ b/plugins/serializers/msgpack/msgpack.go @@ -0,0 +1,43 @@ +package msgpack + +import ( + "github.com/influxdata/telegraf" +) + +// Serializer encodes metrics in MessagePack format +type Serializer struct{} + +// NewSerializer creates a msgpack.Serializer +func NewSerializer() *Serializer { + return &Serializer{} +} + +func marshalMetric(buf []byte, metric telegraf.Metric) ([]byte, error) { + return (&Metric{ + Name: metric.Name(), + Time: MessagePackTime{time: metric.Time()}, + Tags: metric.Tags(), + Fields: metric.Fields(), + }).MarshalMsg(buf) +} + +// Serialize implements serializers.Serializer.Serialize +// github.com/influxdata/telegraf/plugins/serializers/Serializer +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return marshalMetric(nil, metric) +} + +// SerializeBatch implements serializers.Serializer.SerializeBatch +// github.com/influxdata/telegraf/plugins/serializers/Serializer +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + buf := make([]byte, 0) + for _, m := range metrics { + var err error + buf, err = marshalMetric(buf, m) + + if err != nil { + return nil, err + } + } + return buf, nil +} diff --git a/plugins/serializers/msgpack/msgpack_test.go b/plugins/serializers/msgpack/msgpack_test.go new file mode 100644 index 0000000000000..36cc66ea52c59 --- /dev/null +++ b/plugins/serializers/msgpack/msgpack_test.go @@ -0,0 +1,131 @@ +package msgpack + +import ( + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func toTelegrafMetric(m Metric) telegraf.Metric { + tm := metric.New(m.Name, m.Tags, m.Fields, m.Time.time) + return tm +} + +func TestSerializeMetricInt(t *testing.T) { + m := testutil.TestMetric(int64(90)) + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMetricString(t *testing.T) { + m := testutil.TestMetric("foobar") + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMultiFields(t *testing.T) { + m := testutil.TestMetric(int(90)) + m.AddField("value2", 8559615) + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMetricWithEscapes(t *testing.T) { + m := testutil.TestMetric(int(90)) + m.AddField("U,age=Idle", int64(90)) + m.AddTag("cpu tag", "cpu0") + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMultipleMetric(t *testing.T) { + m := testutil.TestMetric(int(90)) + + s := Serializer{} + + encoded, err := s.Serialize(m) + assert.NoError(t, err) + + // Multiple metrics in continous bytes stream + var buf []byte + buf = append(buf, encoded...) + buf = append(buf, encoded...) + buf = append(buf, encoded...) + buf = append(buf, encoded...) + + left := buf + for len(left) > 0 { + decodeM := &Metric{} + left, err = decodeM.UnmarshalMsg(left) + + assert.NoError(t, err) + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM)) + } +} + +func TestSerializeBatch(t *testing.T) { + m := testutil.TestMetric(int(90)) + + metrics := []telegraf.Metric{m, m, m, m} + + s := Serializer{} + + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + left := buf + for len(left) > 0 { + decodeM := &Metric{} + left, err = decodeM.UnmarshalMsg(left) + + assert.NoError(t, err) + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM)) + } +} diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go index c9d0b946370f8..b1960bb7a9f57 100644 --- a/plugins/serializers/nowmetric/nowmetric.go +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -123,9 +123,9 @@ func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) { allmetrics = append(allmetrics, oimetric) } - metricsJson, err := json.Marshal(allmetrics) + metricsJSON, err := json.Marshal(allmetrics) - return metricsJson, err + return metricsJSON, err } func verifyValue(v interface{}) bool { diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go index e49b81c2d232c..b9e7914a6adbf 100644 --- a/plugins/serializers/nowmetric/nowmetric_test.go +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -27,12 +27,11 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":91.5,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) assert.Equal(t, string(expS), string(buf)) @@ -67,15 +66,13 @@ func TestSerialize_TimestampUnits(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(1525478795, 123456789), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1525478795, 123456789), ) s, _ := NewSerializer() actual, err := s.Serialize(m) @@ -93,12 +90,11 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) @@ -113,12 +109,11 @@ func TestSerializeMetricString(t *testing.T) { fields := map[string]interface{}{ "usage_idle": "foobar", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) assert.Equal(t, "null", string(buf)) @@ -133,8 +128,7 @@ func TestSerializeMultiFields(t *testing.T) { "usage_idle": int64(90), "usage_total": 8559615, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) // Sort for predictable field order sort.Slice(m.FieldList(), func(i, j int) bool { @@ -143,7 +137,7 @@ func TestSerializeMultiFields(t *testing.T) { s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"usage_total","resource":"","node":"","value":8559615,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)), (now.UnixNano() / int64(time.Millisecond)))) @@ -158,8 +152,7 @@ func TestSerializeMetricWithEscapes(t *testing.T) { fields := map[string]interface{}{ "U,age=Idle": int64(90), } - m, err := metric.New("My CPU", tags, fields, now) - assert.NoError(t, err) + m := metric.New("My CPU", tags, fields, now) s, _ := NewSerializer() buf, err := s.Serialize(m) @@ -170,17 +163,14 @@ func TestSerializeMetricWithEscapes(t *testing.T) { } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) - metrics := []telegraf.Metric{m, m} s, _ := NewSerializer() buf, err := s.SerializeBatch(metrics) diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md index 19c869ffbccb3..446def0b46d77 100644 --- a/plugins/serializers/prometheus/README.md +++ b/plugins/serializers/prometheus/README.md @@ -8,7 +8,11 @@ use the `metric_version = 2` option in order to properly round trip metrics. not be correct if the metric spans multiple batches. This issue can be somewhat, but not fully, mitigated by using outputs that support writing in "batch format". When using histogram and summary types, it is recommended to -use only the `prometheus_client` output. +use only the `prometheus_client` output. Histogram and Summary types +also update their expiration time based on the most recently received data. +If incoming metrics stop updating specific buckets or quantiles but continue +reporting others every bucket/quantile will continue to exist. + ### Configuration diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index d37ecaaaf2f1d..e160107101ab7 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -7,9 +7,9 @@ import ( "strings" "time" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) const helpString = "Telegraf collected metric" @@ -168,7 +168,6 @@ func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair { labels = append(labels, LabelPair{Name: name, Value: value}) addedFieldLabel = true - } if addedFieldLabel { @@ -201,7 +200,6 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { Metrics: make(map[MetricKey]*Metric), } c.Entries[family] = entry - } metricKey := MakeMetricKey(labels) @@ -243,6 +241,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Histogram: &Histogram{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_bucket"): @@ -291,6 +292,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Summary: &Summary{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_sum"): diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index d2c5f5d098162..67447e66417ae 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) type Input struct { @@ -302,6 +302,117 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "entire histogram expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "histogram does not expire because of addtime from bucket", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(15, 0), // More recent addtime causes entire metric to stay valid + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(10.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(1), + }, + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "summary quantile updates", now: time.Unix(0, 0), @@ -379,6 +490,106 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "Entire summary expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "summary does not expire because of quantile addtime", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 10.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(15, 0), // Recent addtime keeps entire metric around + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Summary: &dto.Summary{ + SampleSum: proto.Float64(1), + SampleCount: proto.Uint64(1), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.5), + Value: proto.Float64(10), + }, + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "expire based on add time", now: time.Unix(20, 0), @@ -425,3 +636,209 @@ func TestCollectionExpire(t *testing.T) { }) } } + +func TestExportTimestamps(t *testing.T) { + tests := []struct { + name string + now time.Time + age time.Duration + input []Input + expected []*dto.MetricFamily + }{ + { + name: "histogram bucket updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + // Next interval + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 20.0, + "http_request_duration_seconds_count": 4, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(4), + SampleSum: proto.Float64(20.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(2), + }, + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(2), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "summary quantile updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + // Updated Summary + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 2.0, + "rpc_duration_seconds_count": 2, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Summary: &dto.Summary{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(2.0), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(2), + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewCollection(FormatConfig{TimestampExport: ExportTimestamp}) + for _, item := range tt.input { + c.Add(item.metric, item.addtime) + } + c.Expire(tt.now, tt.age) + + actual := c.GetProto() + + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/serializers/prometheusremotewrite/README.md b/plugins/serializers/prometheusremotewrite/README.md new file mode 100644 index 0000000000000..a0dc4a8deb03b --- /dev/null +++ b/plugins/serializers/prometheusremotewrite/README.md @@ -0,0 +1,44 @@ +# Prometheus remote write + +The `prometheusremotewrite` data format converts metrics into the Prometheus protobuf +exposition format. + +**Warning**: When generating histogram and summary types, output may +not be correct if the metric spans multiple batches. This issue can be +somewhat, but not fully, mitigated by using outputs that support writing in +"batch format". When using histogram and summary types, it is recommended to +use only the `prometheus_client` output. + +### Configuration + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "https://cortex/api/prom/push" + + ## Optional TLS Config + tls_ca = "/etc/telegraf/ca.pem" + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Data format to output. + data_format = "prometheusremotewrite" + + [outputs.http.headers] + Content-Type = "application/x-protobuf" + Content-Encoding = "snappy" + X-Prometheus-Remote-Write-Version = "0.1.0" +``` + +### Metrics + +A Prometheus metric is created for each integer, float, boolean or unsigned +field. Boolean values are converted to *1.0* for true and *0.0* for false. + +The Prometheus metric names are produced by joining the measurement name with +the field key. In the special case where the measurement name is `prometheus` +it is not included in the final metric name. + +Prometheus labels are produced for each tag. + +**Note:** String fields are ignored and do not produce Prometheus metrics. diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go new file mode 100644 index 0000000000000..b6dd180dba30b --- /dev/null +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -0,0 +1,344 @@ +package prometheusremotewrite + +import ( + "bytes" + "fmt" + "hash/fnv" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/snappy" + "github.com/influxdata/telegraf/plugins/serializers/prometheus" + + "github.com/influxdata/telegraf" + "github.com/prometheus/prometheus/prompb" +) + +type MetricKey uint64 + +// MetricSortOrder controls if the output is sorted. +type MetricSortOrder int + +const ( + NoSortMetrics MetricSortOrder = iota + SortMetrics +) + +// StringHandling defines how to process string fields. +type StringHandling int + +const ( + DiscardStrings StringHandling = iota + StringAsLabel +) + +type FormatConfig struct { + MetricSortOrder MetricSortOrder + StringHandling StringHandling +} + +type Serializer struct { + config FormatConfig +} + +func NewSerializer(config FormatConfig) (*Serializer, error) { + s := &Serializer{config: config} + return s, nil +} + +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return s.SerializeBatch([]telegraf.Metric{metric}) +} + +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + var buf bytes.Buffer + var entries = make(map[MetricKey]prompb.TimeSeries) + for _, metric := range metrics { + commonLabels := s.createLabels(metric) + var metrickey MetricKey + var promts prompb.TimeSeries + for _, field := range metric.FieldList() { + metricName := prometheus.MetricName(metric.Name(), field.Key, metric.Type()) + metricName, ok := prometheus.SanitizeMetricName(metricName) + if !ok { + continue + } + switch metric.Type() { + case telegraf.Counter: + fallthrough + case telegraf.Gauge: + fallthrough + case telegraf.Untyped: + value, ok := prometheus.SampleValue(field.Value) + if !ok { + continue + } + metrickey, promts = getPromTS(metricName, commonLabels, value, metric.Time()) + case telegraf.Histogram: + switch { + case strings.HasSuffix(field.Key, "_bucket"): + // if bucket only, init sum, count, inf + metrickeysum, promtssum := getPromTS(fmt.Sprintf("%s_sum", metricName), commonLabels, float64(0), metric.Time()) + if _, ok = entries[metrickeysum]; !ok { + entries[metrickeysum] = promtssum + } + metrickeycount, promtscount := getPromTS(fmt.Sprintf("%s_count", metricName), commonLabels, float64(0), metric.Time()) + if _, ok = entries[metrickeycount]; !ok { + entries[metrickeycount] = promtscount + } + labels := make([]prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, prompb.Label{ + Name: "le", + Value: "+Inf", + }) + metrickeyinf, promtsinf := getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(0), metric.Time()) + if _, ok = entries[metrickeyinf]; !ok { + entries[metrickeyinf] = promtsinf + } + + le, ok := metric.GetTag("le") + if !ok { + continue + } + bound, err := strconv.ParseFloat(le, 64) + if err != nil { + continue + } + count, ok := prometheus.SampleCount(field.Value) + if !ok { + continue + } + + labels = make([]prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, prompb.Label{ + Name: "le", + Value: fmt.Sprint(bound), + }) + metrickey, promts = getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(count), metric.Time()) + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := prometheus.SampleSum(field.Value) + if !ok { + continue + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_sum", metricName), commonLabels, sum, metric.Time()) + case strings.HasSuffix(field.Key, "_count"): + count, ok := prometheus.SampleCount(field.Value) + if !ok { + continue + } + + // if no bucket generate +Inf entry + labels := make([]prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, prompb.Label{ + Name: "le", + Value: "+Inf", + }) + metrickeyinf, promtsinf := getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(count), metric.Time()) + if minf, ok := entries[metrickeyinf]; !ok || minf.Samples[0].Value == 0 { + entries[metrickeyinf] = promtsinf + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_count", metricName), commonLabels, float64(count), metric.Time()) + default: + continue + } + case telegraf.Summary: + switch { + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := prometheus.SampleSum(field.Value) + if !ok { + continue + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_sum", metricName), commonLabels, sum, metric.Time()) + case strings.HasSuffix(field.Key, "_count"): + count, ok := prometheus.SampleCount(field.Value) + if !ok { + continue + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_count", metricName), commonLabels, float64(count), metric.Time()) + default: + quantileTag, ok := metric.GetTag("quantile") + if !ok { + continue + } + quantile, err := strconv.ParseFloat(quantileTag, 64) + if err != nil { + continue + } + value, ok := prometheus.SampleValue(field.Value) + if !ok { + continue + } + + labels := make([]prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, prompb.Label{ + Name: "quantile", + Value: fmt.Sprint(quantile), + }) + metrickey, promts = getPromTS(metricName, labels, value, metric.Time()) + } + default: + return nil, fmt.Errorf("unknown type %v", metric.Type()) + } + + // A batch of metrics can contain multiple values for a single + // Prometheus sample. If this metric is older than the existing + // sample then we can skip over it. + m, ok := entries[metrickey] + if ok { + if metric.Time().Before(time.Unix(m.Samples[0].Timestamp, 0)) { + continue + } + } + entries[metrickey] = promts + } + } + + var promTS = make([]prompb.TimeSeries, len(entries)) + var i int + for _, promts := range entries { + promTS[i] = promts + i++ + } + + switch s.config.MetricSortOrder { + case SortMetrics: + sort.Slice(promTS, func(i, j int) bool { + lhs := promTS[i].Labels + rhs := promTS[j].Labels + if len(lhs) != len(rhs) { + return len(lhs) < len(rhs) + } + + for index := range lhs { + l := lhs[index] + r := rhs[index] + + if l.Name != r.Name { + return l.Name < r.Name + } + + if l.Value != r.Value { + return l.Value < r.Value + } + } + + return false + }) + } + pb := &prompb.WriteRequest{Timeseries: promTS} + data, err := pb.Marshal() + if err != nil { + return nil, fmt.Errorf("unable to marshal protobuf: %v", err) + } + encoded := snappy.Encode(nil, data) + buf.Write(encoded) + return buf.Bytes(), nil +} + +func hasLabel(name string, labels []prompb.Label) bool { + for _, label := range labels { + if name == label.Name { + return true + } + } + return false +} + +func (s *Serializer) createLabels(metric telegraf.Metric) []prompb.Label { + labels := make([]prompb.Label, 0, len(metric.TagList())) + for _, tag := range metric.TagList() { + // Ignore special tags for histogram and summary types. + switch metric.Type() { + case telegraf.Histogram: + if tag.Key == "le" { + continue + } + case telegraf.Summary: + if tag.Key == "quantile" { + continue + } + } + + name, ok := prometheus.SanitizeLabelName(tag.Key) + if !ok { + continue + } + + // remove tags with empty values + if tag.Value == "" { + continue + } + + labels = append(labels, prompb.Label{Name: name, Value: tag.Value}) + } + + if s.config.StringHandling != StringAsLabel { + return labels + } + + addedFieldLabel := false + for _, field := range metric.FieldList() { + value, ok := field.Value.(string) + if !ok { + continue + } + + name, ok := prometheus.SanitizeLabelName(field.Key) + if !ok { + continue + } + + // If there is a tag with the same name as the string field, discard + // the field and use the tag instead. + if hasLabel(name, labels) { + continue + } + + labels = append(labels, prompb.Label{Name: name, Value: value}) + addedFieldLabel = true + } + + if addedFieldLabel { + sort.Slice(labels, func(i, j int) bool { + return labels[i].Name < labels[j].Name + }) + } + + return labels +} + +func MakeMetricKey(labels []prompb.Label) MetricKey { + h := fnv.New64a() + for _, label := range labels { + h.Write([]byte(label.Name)) + h.Write([]byte("\x00")) + h.Write([]byte(label.Value)) + h.Write([]byte("\x00")) + } + return MetricKey(h.Sum64()) +} + +func getPromTS(name string, labels []prompb.Label, value float64, ts time.Time) (MetricKey, prompb.TimeSeries) { + sample := []prompb.Sample{{ + // Timestamp is int milliseconds for remote write. + Timestamp: ts.UnixNano() / int64(time.Millisecond), + Value: value, + }} + labelscopy := make([]prompb.Label, len(labels), len(labels)+1) + copy(labelscopy, labels) + labels = append(labelscopy, prompb.Label{ + Name: "__name__", + Value: name, + }) + return MakeMetricKey(labels), prompb.TimeSeries{Labels: labels, Samples: sample} +} diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go new file mode 100644 index 0000000000000..f07c2c3fecfc6 --- /dev/null +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -0,0 +1,700 @@ +package prometheusremotewrite + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/golang/snappy" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestRemoteWriteSerialize(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metric telegraf.Metric + expected []byte + }{ + { + name: "simple", + metric: testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + expected: []byte(` +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus input untyped", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Untyped, + ), + expected: []byte(` +http_requests_total{code="400", method="post"} 3 +`), + }, + { + name: "prometheus input counter", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + expected: []byte(` +http_requests_total{code="400", method="post"} 3 +`), + }, + { + name: "prometheus input gauge", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + expected: []byte(` +http_requests_total{code="400", method="post"} 3 +`), + }, + { + name: "prometheus input histogram no buckets", + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +http_request_duration_seconds_count 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +`), + }, + { + name: "prometheus input histogram only bucket", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "le": "0.5", + }, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +http_request_duration_seconds_count 0 +http_request_duration_seconds_sum 0 +http_request_duration_seconds_bucket{le="+Inf"} 0 +http_request_duration_seconds_bucket{le="0.5"} 129389 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + data, err := s.Serialize(tt.metric) + require.NoError(t, err) + actual, err := prompbToText(data) + require.NoError(t, err) + + require.Equal(t, strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} + +func TestRemoteWriteSerializeBatch(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metrics []telegraf.Metric + expected []byte + }{ + { + name: "simple", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "two.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "multiple metric families", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + "time_guest": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_guest{host="one.example.org"} 42 +cpu_time_idle{host="one.example.org"} 42 +`), + }, + { + name: "histogram", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 24054.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.1"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 33444.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.2"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 100392.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.5"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "1.0"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 133988.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 144320.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +http_request_duration_seconds_count 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +`), + }, + { + name: "summary with quantile", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.7560473e+07, + "rpc_duration_seconds_count": 2693, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 3102.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.05"}, + map[string]interface{}{ + "rpc_duration_seconds": 3272.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 4773.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.9"}, + map[string]interface{}{ + "rpc_duration_seconds": 9001.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.99"}, + map[string]interface{}{ + "rpc_duration_seconds": 76656.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +rpc_duration_seconds_count 2693 +rpc_duration_seconds_sum 17560473 +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +`), + }, + { + name: "newer sample", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(1, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle 43 +`), + }, + { + name: "colons are not replaced in metric name from measurement", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu::xyzzy", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu::xyzzy_time_idle 42 +`), + }, + { + name: "colons are not replaced in metric name from field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time:idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time:idle 42 +`), + }, + { + name: "invalid label", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host-name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "colons are replaced in label name", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host:name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "discard strings", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle 42 +`), + }, + { + name: "string as label", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "string as label duplicate tag", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu1", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "replace characters when using string as label", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "host:name": "example.org", + "time_idle": 42.0, + }, + time.Unix(1574279268, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "multiple fields grouping", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 8106.04, + "time_system": 26271.4, + "time_user": 92904.33, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{ + "time_guest": 8181.63, + "time_system": 25351.49, + "time_user": 96912.57, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu2", + }, + map[string]interface{}{ + "time_guest": 7470.04, + "time_system": 24998.43, + "time_user": 96034.08, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu3", + }, + map[string]interface{}{ + "time_guest": 7517.95, + "time_system": 24970.82, + "time_user": 94148, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_guest{cpu="cpu0"} 8106.04 +cpu_time_system{cpu="cpu0"} 26271.4 +cpu_time_user{cpu="cpu0"} 92904.33 +cpu_time_guest{cpu="cpu1"} 8181.63 +cpu_time_system{cpu="cpu1"} 25351.49 +cpu_time_user{cpu="cpu1"} 96912.57 +cpu_time_guest{cpu="cpu2"} 7470.04 +cpu_time_system{cpu="cpu2"} 24998.43 +cpu_time_user{cpu="cpu2"} 96034.08 +cpu_time_guest{cpu="cpu3"} 7517.95 +cpu_time_system{cpu="cpu3"} 24970.82 +cpu_time_user{cpu="cpu3"} 94148 +`), + }, + { + name: "summary with no quantile", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.7560473e+07, + "rpc_duration_seconds_count": 2693, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +rpc_duration_seconds_count 2693 +rpc_duration_seconds_sum 17560473 +`), + }, + { + name: "empty label string value", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "cpu": "", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` + time_idle 42 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + data, err := s.SerializeBatch(tt.metrics) + require.NoError(t, err) + actual, err := prompbToText(data) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} + +func prompbToText(data []byte) ([]byte, error) { + var buf = bytes.Buffer{} + protobuff, err := snappy.Decode(nil, data) + if err != nil { + return nil, err + } + var req prompb.WriteRequest + err = req.Unmarshal(protobuff) + if err != nil { + return nil, err + } + samples := protoToSamples(&req) + for _, sample := range samples { + _, err = buf.Write([]byte(fmt.Sprintf("%s %s\n", sample.Metric.String(), sample.Value.String()))) + if err != nil { + return nil, err + } + } + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func protoToSamples(req *prompb.WriteRequest) model.Samples { + var samples model.Samples + for _, ts := range req.Timeseries { + metric := make(model.Metric, len(ts.Labels)) + for _, l := range ts.Labels { + metric[model.LabelName(l.Name)] = model.LabelValue(l.Value) + } + + for _, s := range ts.Samples { + samples = append(samples, &model.Sample{ + Metric: metric, + Value: model.SampleValue(s.Value), + Timestamp: model.Time(s.Timestamp), + }) + } + } + return samples +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index b12ef7660b981..e67a9594dda73 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -9,8 +9,10 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/plugins/serializers/msgpack" "github.com/influxdata/telegraf/plugins/serializers/nowmetric" "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/influxdata/telegraf/plugins/serializers/prometheusremotewrite" "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" "github.com/influxdata/telegraf/plugins/serializers/wavefront" ) @@ -51,9 +53,15 @@ type Config struct { // Carbon2 metric format. Carbon2Format string `toml:"carbon2_format"` + // Character used for metric name sanitization in Carbon2. + Carbon2SanitizeReplaceChar string `toml:"carbon2_sanitize_replace_char"` + // Support tags in graphite protocol GraphiteTagSupport bool `toml:"graphite_tag_support"` + // Support tags which follow the spec + GraphiteTagSanitizeMode string `toml:"graphite_tag_sanitize_mode"` + // Character for separating metric name and field for Graphite tags GraphiteSeparator string `toml:"graphite_separator"` @@ -113,25 +121,46 @@ func NewSerializer(config *Config) (Serializer, error) { case "influx": serializer, err = NewInfluxSerializerConfig(config) case "graphite": - serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteSeparator, config.Templates) + serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteTagSanitizeMode, config.GraphiteSeparator, config.Templates) case "json": - serializer, err = NewJsonSerializer(config.TimestampUnits) + serializer, err = NewJSONSerializer(config.TimestampUnits) case "splunkmetric": serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric) case "nowmetric": serializer, err = NewNowSerializer() case "carbon2": - serializer, err = NewCarbon2Serializer(config.Carbon2Format) + serializer, err = NewCarbon2Serializer(config.Carbon2Format, config.Carbon2SanitizeReplaceChar) case "wavefront": serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride) case "prometheus": serializer, err = NewPrometheusSerializer(config) + case "prometheusremotewrite": + serializer, err = NewPrometheusRemoteWriteSerializer(config) + case "msgpack": + serializer, err = NewMsgpackSerializer() default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return serializer, err } +func NewPrometheusRemoteWriteSerializer(config *Config) (Serializer, error) { + sortMetrics := prometheusremotewrite.NoSortMetrics + if config.PrometheusExportTimestamp { + sortMetrics = prometheusremotewrite.SortMetrics + } + + stringAsLabels := prometheusremotewrite.DiscardStrings + if config.PrometheusStringAsLabel { + stringAsLabels = prometheusremotewrite.StringAsLabel + } + + return prometheusremotewrite.NewSerializer(prometheusremotewrite.FormatConfig{ + MetricSortOrder: sortMetrics, + StringHandling: stringAsLabels, + }) +} + func NewPrometheusSerializer(config *Config) (Serializer, error) { exportTimestamp := prometheus.NoExportTimestamp if config.PrometheusExportTimestamp { @@ -159,16 +188,16 @@ func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []stri return wavefront.NewSerializer(prefix, useStrict, sourceOverride) } -func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) { +func NewJSONSerializer(timestampUnits time.Duration) (Serializer, error) { return json.NewSerializer(timestampUnits) } -func NewCarbon2Serializer(carbon2format string) (Serializer, error) { - return carbon2.NewSerializer(carbon2format) +func NewCarbon2Serializer(carbon2format string, carbon2SanitizeReplaceChar string) (Serializer, error) { + return carbon2.NewSerializer(carbon2format, carbon2SanitizeReplaceChar) } -func NewSplunkmetricSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (Serializer, error) { - return splunkmetric.NewSerializer(splunkmetric_hec_routing, splunkmetric_multimetric) +func NewSplunkmetricSerializer(splunkmetricHecRouting bool, splunkmetricMultimetric bool) (Serializer, error) { + return splunkmetric.NewSerializer(splunkmetricHecRouting, splunkmetricMultimetric) } func NewNowSerializer() (Serializer, error) { @@ -197,7 +226,7 @@ func NewInfluxSerializer() (Serializer, error) { return influx.NewSerializer(), nil } -func NewGraphiteSerializer(prefix, template string, tag_support bool, separator string, templates []string) (Serializer, error) { +func NewGraphiteSerializer(prefix, template string, tagSupport bool, tagSanitizeMode string, separator string, templates []string) (Serializer, error) { graphiteTemplates, defaultTemplate, err := graphite.InitGraphiteTemplates(templates) if err != nil { @@ -208,15 +237,24 @@ func NewGraphiteSerializer(prefix, template string, tag_support bool, separator template = defaultTemplate } + if tagSanitizeMode == "" { + tagSanitizeMode = "strict" + } + if separator == "" { separator = "." } return &graphite.GraphiteSerializer{ - Prefix: prefix, - Template: template, - TagSupport: tag_support, - Separator: separator, - Templates: graphiteTemplates, + Prefix: prefix, + Template: template, + TagSupport: tagSupport, + TagSanitizeMode: tagSanitizeMode, + Separator: separator, + Templates: graphiteTemplates, }, nil } + +func NewMsgpackSerializer() (Serializer, error) { + return msgpack.NewSerializer(), nil +} diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index b96db5cf81155..fc9ffe61ecfe4 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -2,7 +2,6 @@ package splunkmetric import ( "encoding/json" - "fmt" "log" "github.com/influxdata/telegraf" @@ -30,34 +29,25 @@ type HECTimeSeries struct { } // NewSerializer Setup our new serializer -func NewSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (*serializer, error) { +func NewSerializer(splunkmetricHecRouting bool, splunkmetricMultimetric bool) (*serializer, error) { /* Define output params */ s := &serializer{ - HecRouting: splunkmetric_hec_routing, - SplunkmetricMultiMetric: splunkmetric_multimetric, + HecRouting: splunkmetricHecRouting, + SplunkmetricMultiMetric: splunkmetricMultimetric, } return s, nil } func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { - - m, err := s.createObject(metric) - if err != nil { - return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) - } - - return m, nil + return s.createObject(metric), nil } func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - var serialized []byte for _, metric := range metrics { - m, err := s.createObject(metric) - if err != nil { - return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) - } else if m != nil { + m := s.createObject(metric) + if m != nil { serialized = append(serialized, m...) } } @@ -121,7 +111,6 @@ func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSerie var metricJSON []byte for _, field := range metric.FieldList() { - value, valid := verifyValue(field.Value) if !valid { @@ -160,8 +149,7 @@ func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSerie return metricGroup, nil } -func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) { - +func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte) { /* Splunk supports one metric json object, and does _not_ support an array of JSON objects. ** Splunk has the following required names for the metric store: ** metric_name: The name of the metric @@ -198,7 +186,7 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e } // Return the metric group regardless of if it's multimetric or single metric. - return metricGroup, nil + return metricGroup } func verifyValue(v interface{}) (value interface{}, valid bool) { diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index c00bcc7798aac..c088d99f7f1a4 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -25,15 +25,14 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricFloatHec(t *testing.T) { @@ -45,15 +44,14 @@ func TestSerializeMetricFloatHec(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(true, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"time":1529875740.819,"fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricInt(t *testing.T) { @@ -64,16 +62,15 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricIntHec(t *testing.T) { @@ -84,16 +81,15 @@ func TestSerializeMetricIntHec(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(true, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricBool(t *testing.T) { @@ -104,16 +100,15 @@ func TestSerializeMetricBool(t *testing.T) { fields := map[string]interface{}{ "oomkiller": bool(true), } - m, err := metric.New("docker", tags, fields, now) - assert.NoError(t, err) + m := metric.New("docker", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricBoolHec(t *testing.T) { @@ -124,16 +119,15 @@ func TestSerializeMetricBoolHec(t *testing.T) { fields := map[string]interface{}{ "oomkiller": bool(false), } - m, err := metric.New("docker", tags, fields, now) - assert.NoError(t, err) + m := metric.New("docker", tags, fields, now) s, _ := NewSerializer(true, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricString(t *testing.T) { @@ -145,39 +139,35 @@ func TestSerializeMetricString(t *testing.T) { "processorType": "ARMv7 Processor rev 4 (v7l)", "usage_idle": int64(5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) assert.NoError(t, err) } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) - n := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 92.0, - }, - time.Unix(0, 0), - ), + + n := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, n} @@ -186,20 +176,18 @@ func TestSerializeBatch(t *testing.T) { assert.NoError(t, err) expS := `{"_value":42,"metric_name":"cpu.value","time":0}{"_value":92,"metric_name":"cpu.value","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMulti(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "user": 42.0, - "system": 8.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "user": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m} @@ -208,51 +196,44 @@ func TestSerializeMulti(t *testing.T) { assert.NoError(t, err) expS := `{"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeBatchHec(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) - n := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 92.0, - }, - time.Unix(0, 0), - ), + n := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), ) - metrics := []telegraf.Metric{m, n} s, _ := NewSerializer(true, false) buf, err := s.SerializeBatch(metrics) assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":42,"metric_name":"cpu.value"}}{"time":0,"fields":{"_value":92,"metric_name":"cpu.value"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMultiHec(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "usage": 42.0, - "system": 8.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "usage": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m} @@ -261,5 +242,5 @@ func TestSerializeMultiHec(t *testing.T) { assert.NoError(t, err) expS := `{"time":0,"fields":{"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index 67fa1ae3a6834..0abcf799d2a0f 100755 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -7,7 +7,7 @@ import ( "sync" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/wavefront" + "github.com/influxdata/telegraf/plugins/outputs/wavefront" // TODO: this dependency is going the wrong way: Move MetricPoint into the serializer. ) // WavefrontSerializer : WavefrontSerializer struct @@ -49,7 +49,7 @@ func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*Wav return s, nil } -func (s *WavefrontSerializer) serialize(buf *buffer, m telegraf.Metric) { +func (s *WavefrontSerializer) serialize(m telegraf.Metric) { const metricSeparator = "." for fieldName, value := range m.Fields() { @@ -90,7 +90,7 @@ func (s *WavefrontSerializer) serialize(buf *buffer, m telegraf.Metric) { func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { s.mu.Lock() s.scratch.Reset() - s.serialize(&s.scratch, m) + s.serialize(m) out := s.scratch.Copy() s.mu.Unlock() return out, nil @@ -100,7 +100,7 @@ func (s *WavefrontSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, s.mu.Lock() s.scratch.Reset() for _, m := range metrics { - s.serialize(&s.scratch, m) + s.serialize(m) } out := s.scratch.Copy() s.mu.Unlock() diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go index 548326e703e6c..ee653c62b4072 100755 --- a/plugins/serializers/wavefront/wavefront_test.go +++ b/plugins/serializers/wavefront/wavefront_test.go @@ -178,13 +178,11 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.500000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -199,13 +197,11 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(91), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -220,13 +216,11 @@ func TestSerializeMetricBoolTrue(t *testing.T) { fields := map[string]interface{}{ "usage_idle": true, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 1.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -241,13 +235,11 @@ func TestSerializeMetricBoolFalse(t *testing.T) { fields := map[string]interface{}{ "usage_idle": false, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 0.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -262,13 +254,11 @@ func TestSerializeMetricFieldValue(t *testing.T) { fields := map[string]interface{}{ "value": int64(91), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -283,13 +273,11 @@ func TestSerializeMetricPrefix(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(91), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{Prefix: "telegraf."} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"telegraf.cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -306,10 +294,7 @@ func benchmarkMetrics(b *testing.B) [4]telegraf.Metric { fields := map[string]interface{}{ "usage_idle": v, } - m, err := metric.New("cpu", tags, fields, now) - if err != nil { - b.Fatal(err) - } + m := metric.New("cpu", tags, fields, now) return m } return [4]telegraf.Metric{ diff --git a/scripts/alpine.docker b/scripts/alpine.docker deleted file mode 100644 index 4c83e322d277e..0000000000000 --- a/scripts/alpine.docker +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.15.2 as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN CGO_ENABLED=0 make go-install - -FROM alpine:3.12 -RUN echo 'hosts: files dns' >> /etc/nsswitch.conf -RUN apk add --no-cache iputils ca-certificates net-snmp-tools procps lm_sensors && \ - update-ca-certificates -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] diff --git a/scripts/buster.docker b/scripts/buster.docker deleted file mode 100644 index 3919d8ca5fd20..0000000000000 --- a/scripts/buster.docker +++ /dev/null @@ -1,15 +0,0 @@ -FROM golang:1.15.2-buster as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN make go-install - -FROM buildpack-deps:buster-curl -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] diff --git a/scripts/check-deps.sh b/scripts/check-deps.sh index b76d47d579004..c52c553f5d1ba 100755 --- a/scripts/check-deps.sh +++ b/scripts/check-deps.sh @@ -51,6 +51,12 @@ for dep in $(LC_ALL=C sort -u "${tmpdir}/golist"); do echo "${dep}" >> "${tmpdir}/HEAD" done +# If there are two versions of a library that have the same base (like +# github.com/foo/bar github.com/foo/bar/v3) there will be a duplicate +# in the list. Remove duplicates again. +mv "${tmpdir}/HEAD" "${tmpdir}/HEAD-dup" +uniq "${tmpdir}/HEAD-dup" > "${tmpdir}/HEAD" + grep '^-' docs/LICENSE_OF_DEPENDENCIES.md | grep -v github.com/DataDog/datadog-agent | cut -f 2 -d' ' > "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" diff -U0 "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" "${tmpdir}/HEAD" || { diff --git a/scripts/check-dynamic-glibc-versions.sh b/scripts/check-dynamic-glibc-versions.sh new file mode 100755 index 0000000000000..b00e3bf9b7d08 --- /dev/null +++ b/scripts/check-dynamic-glibc-versions.sh @@ -0,0 +1,82 @@ +#!/bin/bash +set -euo pipefail +IFS=$'\n\t' + +usage () { + echo "Check that no dynamic symbols provided by glibc are newer than a given version" + echo "Usage:" + echo " $0 program version" + echo "where program is the elf binary to check and version is a dotted version string like 2.3.4" + exit 1 +} + +#validate input and display help +[[ $# = 2 ]] || usage +prog=$1 +max=$2 + +#make sure dependencies are installed +have_deps=true +for i in objdump grep sort uniq sed; do + if ! command -v "$i" > /dev/null; then + echo "$i not in path" + have_deps=false + fi +done +if [[ $have_deps = false ]]; then + exit 1 +fi + +#compare dotted versions +#see https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash +vercomp () { + if [[ $1 == $2 ]] + then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +if ! objdump -p "$prog" | grep -q NEEDED; then + echo "$prog doesn't have dynamic library dependencies" + exit 0 +fi + +objdump -T "$prog" | # get the dynamic symbol table + sed -n "s/.* GLIBC_\([0-9.]\+\).*/\1/p" | # find the entries for glibc and grab the version + sort | uniq | # remove duplicates + while read v; do + set +e + vercomp "$v" "$max" # fail if any version is newer than our max + comp=$? + set -e + if [[ $comp -eq 1 ]]; then + echo "$v is newer than $max" + exit 1 + fi + done + +exit 0 diff --git a/scripts/check-file-changes.sh b/scripts/check-file-changes.sh new file mode 100755 index 0000000000000..3ee954a35040f --- /dev/null +++ b/scripts/check-file-changes.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# CIRCLE-CI SCRIPT: This file is used exclusively for CI +# To prevent the tests/builds to run for only a doc change, this script checks what files have changed in a pull request. + +exit 0 + +BRANCH="$(git rev-parse --abbrev-ref HEAD)" +echo $BRANCH +if [[ ${CIRCLE_PULL_REQUEST##*/} != "" ]]; then # Only skip if their is an associated pull request with this job + # Ask git for all the differences between this branch and master + # Then use grep to look for changes in the .circleci/ directory, anything named *.go or *.mod or *.sum or *.sh or Makefile + # If no match is found, then circleci step halt will stop the CI job but mark it successful + git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)$|^(.*\.(go|mod|sum|sh))$|^Makefile$" || circleci step halt; +fi diff --git a/scripts/ci-1.14.docker b/scripts/ci-1.14.docker deleted file mode 100644 index af3559460b3bd..0000000000000 --- a/scripts/ci-1.14.docker +++ /dev/null @@ -1,23 +0,0 @@ -FROM golang:1.14.9 - -RUN chmod -R 755 "$GOPATH" - -RUN DEBIAN_FRONTEND=noninteractive \ - apt update && apt install -y --no-install-recommends \ - autoconf \ - git \ - libtool \ - locales \ - make \ - awscli \ - rpm \ - ruby \ - ruby-dev \ - zip && \ - rm -rf /var/lib/apt/lists/* - -RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime -RUN locale-gen C.UTF-8 || true -ENV LANG=C.UTF-8 - -RUN gem install fpm diff --git a/scripts/ci-1.15.docker b/scripts/ci-1.17.docker similarity index 95% rename from scripts/ci-1.15.docker rename to scripts/ci-1.17.docker index 65230db5f6f3b..6b220c0898e94 100644 --- a/scripts/ci-1.15.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.2 +FROM golang:1.17.3 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/docker-entrypoint.sh b/scripts/docker-entrypoint.sh deleted file mode 100755 index 6e7580b21a92f..0000000000000 --- a/scripts/docker-entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -if [ "${1:0:1}" = '-' ]; then - set -- telegraf "$@" -fi - -exec "$@" diff --git a/scripts/generate_config.sh b/scripts/generate_config.sh new file mode 100755 index 0000000000000..c85dd05172631 --- /dev/null +++ b/scripts/generate_config.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# This script is responsible for generating the Telegraf config found under the `etc` directory. +# This script is meant to be only ran in within the Circle CI pipeline so that the Tiger Bot can update them automatically. +# It supports Windows and Linux because the configs are different depending on the OS. + + +os=$1 # windows or linux +exe_path="/build/extracted" # Path will contain telegraf binary +config_name="telegraf.conf" + +if [ "$os" = "windows" ]; then + zip=$(/bin/find ./build/dist -maxdepth 1 -name "*windows_amd64.zip" -print) + exe_path="$PWD/build/extracted" + unzip "$zip" -d "$exe_path" + config_name="telegraf_windows.conf" + exe_path=$(/bin/find "$exe_path" -name telegraf.exe -type f -print) +else + tar_path=$(find /build/dist -maxdepth 1 -name "*linux_amd64.tar.gz" -print | grep -v ".*static.*") + mkdir "$exe_path" + tar --extract --file="$tar_path" --directory "$exe_path" + exe_path=$(find "$exe_path" -name telegraf -type f -print | grep ".*usr/bin/.*") +fi + +$exe_path config > $config_name + +mkdir ./new-config +mv $config_name ./new-config diff --git a/scripts/install_gotestsum.sh b/scripts/install_gotestsum.sh new file mode 100755 index 0000000000000..0b813e20879fa --- /dev/null +++ b/scripts/install_gotestsum.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +set -eux + +OS=$1 +EXE=$2 +VERSION="1.7.0" + +WINDOWS_SHA="7ae12ddb171375f0c14d6a09dd27a5c1d1fc72edeea674e3d6e7489a533b40c1" +DARWIN_SHA="a8e2351604882af1a67601cbeeacdcfa9b17fc2f6fbac291cf5d434efdf2d85b" +LINUX_SHA="b5c98cc408c75e76a097354d9487dca114996e821b3af29a0442aa6c9159bd40" + +setup_gotestsum () { + echo "installing gotestsum" + curl -L "https://github.com/gotestyourself/gotestsum/releases/download/v${VERSION}/gotestsum_${VERSION}_${OS}_amd64.tar.gz" --output gotestsum.tar.gz + + if [ "$OS" = "windows" ]; then + SHA=$WINDOWS_SHA + SHATOOL="sha256sum" + elif [ "$OS" = "darwin" ]; then + SHA=$DARWIN_SHA + SHATOOL="shasum --algorithm 256" + elif [ "$OS" = "linux" ]; then + SHA=$LINUX_SHA + SHATOOL="sha256sum" + fi + + if ! echo "${SHA} gotestsum.tar.gz" | ${SHATOOL} --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + + tar --extract --file=gotestsum.tar.gz "${EXE}" +} + +if test -f "${EXE}"; then + echo "gotestsum is already installed" + v=$(./"${EXE}" --version) + echo "$v is installed, required version is ${VERSION}" + if [ "$v" != "gotestsum version ${VERSION}" ]; then + setup_gotestsum + ${EXE} --version + fi +else + setup_gotestsum +fi diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh new file mode 100644 index 0000000000000..2676495d3664a --- /dev/null +++ b/scripts/installgo_mac.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +set -eux + +ARCH=$(uname -m) +GO_VERSION="1.17.3" +if [ "$ARCH" = 'arm64' ]; then + GO_ARCH="darwin-arm64" + GO_VERSION_SHA="ffe45ef267271b9681ca96ca9b0eb9b8598dd82f7bb95b27af3eef2461dc3d2c" # from https://golang.org/dl +elif [ "$ARCH" = 'x86_64' ]; then + GO_ARCH="darwin-amd64" + GO_VERSION_SHA="765c021e372a87ce0bc58d3670ab143008dae9305a79e9fa83440425529bb636" # from https://golang.org/dl +fi + +# This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) +path="/usr/local/Cellar" +sudo mkdir -p ${path} + +# Download Go and verify Go tarball. (Note: we aren't using brew because +# it is slow to update and we can't pull specific minor versions.) +setup_go () { + echo "installing go" + curl -L https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz + if ! echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum --algorithm 256 --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + + sudo rm -rf ${path}/go + sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz + sudo mkdir -p /usr/local/bin + sudo ln -sf ${path}/go/bin/go /usr/local/bin/go + sudo ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt +} + +if command -v go >/dev/null 2>&1; then + echo "Go is already installed" + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) + echo "$v is installed, required version is ${GO_VERSION}" + if [ "$v" != ${GO_VERSION} ]; then + setup_go + go version + fi +else + setup_go +fi diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh new file mode 100644 index 0000000000000..1571daa28eecb --- /dev/null +++ b/scripts/installgo_windows.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +set -eux + +GO_VERSION="1.17.3" + +setup_go () { + choco upgrade golang --version=${GO_VERSION} + choco install make + git config --system core.longpaths true + rm -rf /c/Go + cp -r /c/Program\ Files/Go /c/ +} + +if command -v go >/dev/null 2>&1; then + echo "Go is already installed" + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) + echo "$v is installed, required version is ${GO_VERSION}" + if [ "$v" != ${GO_VERSION} ]; then + setup_go + go version + fi +else + setup_go +fi diff --git a/scripts/local_circleci.sh b/scripts/local_circleci.sh new file mode 100755 index 0000000000000..87623713d605e --- /dev/null +++ b/scripts/local_circleci.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +jobName=$1 + +circleci config process .circleci/config.yml > process.yml +circleci local execute -c process.yml --job $jobName diff --git a/scripts/mac-signing.sh b/scripts/mac-signing.sh new file mode 100644 index 0000000000000..dc0630fc82873 --- /dev/null +++ b/scripts/mac-signing.sh @@ -0,0 +1,72 @@ +# Acquire the necessary certificates. +base64 -D -o MacCertificate.p12 <<< $MacCertificate +sudo security import MacCertificate.p12 -k /Library/Keychains/System.keychain -P $MacCertificatePassword -A +base64 -D -o AppleSigningAuthorityCertificate.cer <<< $AppleSigningAuthorityCertificate +sudo security import AppleSigningAuthorityCertificate.cer -k '/Library/Keychains/System.keychain' -A + +# Extract the built mac binary and sign it. +cd dist +tarFile=$(find . -name "*darwin_amd64.tar*") +tar -xzvf $tarFile +baseName=$(basename $tarFile .tar.gz) +cd $(find . -name "*telegraf-*" -type d) +cd usr/bin +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime telegraf +codesign -v telegraf + +# Reset back out to the main directory. +cd +cd project/dist +extractedFolder=$(find . -name "*telegraf-*" -type d) + +# Sign the 'telegraf entry' script, which is required to open Telegraf upon opening the .app bundle. +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime ../scripts/telegraf_entry_mac +codesign -v ../scripts/telegraf_entry_mac + +# Create the .app bundle. +mkdir Telegraf +cd Telegraf +mkdir Contents +cd Contents +mkdir MacOS +mkdir Resources +cd ../.. +cp ../info.plist Telegraf/Contents +cp -R "$extractedFolder"/ Telegraf/Contents/Resources +cp ../scripts/telegraf_entry_mac Telegraf/Contents/MacOS +cp ../assets/icon.icns Telegraf/Contents/Resources +chmod +x Telegraf/Contents/MacOS/telegraf_entry_mac +mv Telegraf Telegraf.app + +# Sign the entire .app bundle, and wrap it in a DMG. +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime --deep --force Telegraf.app +hdiutil create -size 500m -volname Telegraf -srcfolder Telegraf.app "$baseName".dmg +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime "$baseName".dmg + +# Send the DMG to be notarized. +uuid=$(xcrun altool --notarize-app --primary-bundle-id "com.influxdata.telegraf" --username "$AppleUsername" --password "$ApplePassword" --file "$baseName".dmg | awk '/RequestUUID/ { print $NF; }') +echo $uuid +if [[ $uuid == "" ]]; then + echo "Could not upload for notarization." + exit 1 +fi + +# Wait until the status returns something other than 'in progress'. +request_status="in progress" +while [[ "$request_status" == "in progress" ]]; do + sleep 10 + request_status=$(xcrun altool --notarization-info $uuid --username "$AppleUsername" --password "$ApplePassword" 2>&1 | awk -F ': ' '/Status:/ { print $2; }' ) +done + +if [[ $request_status != "success" ]]; then + echo "Failed to notarize." + exit 1 +fi + +# Attach the notarization to the DMG. +xcrun stapler staple "$baseName".dmg +rm -rf Telegraf.app +rm -rf $extractedFolder +ls + +echo "Signed and notarized!" diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100644 index cf29b5c23a0e8..0000000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,179 +0,0 @@ -#!/bin/sh -# -# usage: release.sh BUILD_NUM -# -# Requirements: -# - curl -# - jq -# - sha256sum -# - awscli -# - gpg -# -# CIRCLE_TOKEN set to a CircleCI API token that can list the artifacts. -# -# AWS cli setup to be able to write to the BUCKET. -# -# GPG setup with a signing key. - -BUILD_NUM="${1:?usage: release.sh BUILD_NUM}" -BUCKET="${2:-dl.influxdata.com/telegraf/releases}" - -: ${CIRCLE_TOKEN:?"Must set CIRCLE_TOKEN"} - -tmpdir="$(mktemp -d -t telegraf.XXXXXXXXXX)" - -on_exit() { - rm -rf "$tmpdir" -} -trap on_exit EXIT - -echo "${tmpdir}" -cd "${tmpdir}" || exit 1 - -curl -s -S -L -H Circle-Token:${CIRCLE_TOKEN} \ - "https://circleci.com/api/v2/project/gh/influxdata/telegraf/${BUILD_NUM}/artifacts" \ - -o artifacts || exit 1 - -cat artifacts | jq -r '.items[] | "\(.url) \(.path|ltrimstr("build/dist/"))"' > manifest - -while read url path; -do - echo $url - curl -s -S -L -o "$path" "$url" && - sha256sum "$path" > "$path.DIGESTS" && - gpg --armor --detach-sign "$path.DIGESTS" && - gpg --armor --detach-sign "$path" || exit 1 -done < manifest - -echo -cat *.DIGESTS -echo - -arch() { - case ${1} in - *i386.*) - echo i386;; - *armel.*) - echo armel;; - *armv6hl.*) - echo armv6hl;; - *armhf.*) - echo armhf;; - *arm64.* | *aarch64.*) - echo arm64;; - *amd64.* | *x86_64.*) - echo amd64;; - *s390x.*) - echo s390x;; - *mipsel.*) - echo mipsel;; - *mips.*) - echo mips;; - *) - echo unknown - esac -} - -platform() { - case ${1} in - *".rpm") - echo Centos;; - *".deb") - echo Debian;; - *"linux"*) - echo Linux;; - *"freebsd"*) - echo FreeBSD;; - *"darwin"*) - echo Mac OS X;; - *"windows"*) - echo Windows;; - *) - echo unknown;; - esac -} - -echo "Arch | Platform | Package | SHA256" -echo "---| --- | --- | ---" -while read url path; -do - echo "$(arch ${path}) | $(platform ${path}) | [\`${path}\`](https://dl.influxdata.com/telegraf/releases/${path}) | \`$(sha256sum ${path} | cut -f1 -d' ')\`" -done < manifest -echo "" - -package="$(grep *_amd64.deb manifest | cut -f2 -d' ')" -cat -<